code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase__ ( __lowercase ):
def __get__(self : List[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
__a : Optional[Any] = '''__cached_''' + self.fget.__name__
__a : List[Any] = getattr(snake_case_ , snake_case_ , snake_case_ )
if cached is None:
__a : Tuple = self.fget(snake_case_ )
setattr(snake_case_ , snake_case_ , snake_case_ )
return cached
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : Optional[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"invalid truth value {val!r}" )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if is_torch_fx_proxy(lowerCAmelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase__ , np.ndarray )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
return isinstance(lowerCAmelCase__ , np.ndarray )
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] ):
return _is_numpy(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
import torch
return isinstance(lowerCAmelCase__ , torch.Tensor )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
return False if not is_torch_available() else _is_torch(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
import torch
return isinstance(lowerCAmelCase__ , torch.device )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
import torch
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return False
return isinstance(lowerCAmelCase__ , torch.dtype )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
import tensorflow as tf
return isinstance(lowerCAmelCase__ , tf.Tensor )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : int ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(lowerCAmelCase__ )
return type(lowerCAmelCase__ ) == tf.Tensor
def __UpperCamelCase ( lowerCAmelCase__ : int ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Dict ):
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase__ , jnp.ndarray )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
return False if not is_flax_available() else _is_jax(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return [to_py_obj(lowerCAmelCase__ ) for o in obj]
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ ).tolist()
elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return np.array(lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ )
else:
return obj
class UpperCamelCase__ ( __lowercase ):
def lowerCAmelCase (self : Optional[int] ):
__a : Any = fields(self )
# Safety and consistency checks
if not len(snake_case_ ):
raise ValueError(f"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field." )
__a : Optional[Any] = getattr(self , class_fields[0].name )
__a : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(snake_case_ ):
if isinstance(snake_case_ , snake_case_ ):
__a : Optional[Any] = first_field.items()
__a : str = True
else:
try:
__a : Union[str, Any] = iter(snake_case_ )
__a : List[str] = True
except TypeError:
__a : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(snake_case_ ):
if (
not isinstance(snake_case_ , (list, tuple) )
or not len(snake_case_ ) == 2
or not isinstance(element[0] , snake_case_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__a : Optional[Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__a : Union[str, Any] = element[1]
elif first_field is not None:
__a : Tuple = first_field
else:
for field in class_fields:
__a : Dict = getattr(self , field.name )
if v is not None:
__a : str = v
def __delitem__(self : Tuple , *snake_case_ : Any , **snake_case_ : Any ):
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def lowerCAmelCase (self : List[Any] , *snake_case_ : Tuple , **snake_case_ : Optional[Any] ):
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def lowerCAmelCase (self : Any , *snake_case_ : Tuple , **snake_case_ : Optional[int] ):
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def lowerCAmelCase (self : Union[str, Any] , *snake_case_ : Optional[int] , **snake_case_ : Optional[Any] ):
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__(self : Optional[int] , snake_case_ : Dict ):
if isinstance(snake_case_ , snake_case_ ):
__a : Optional[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(snake_case_ , snake_case_ )
super().__setattr__(snake_case_ , snake_case_ )
def __setitem__(self : List[str] , snake_case_ : Optional[int] , snake_case_ : Any ):
# Will raise a KeyException if needed
super().__setitem__(snake_case_ , snake_case_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(snake_case_ , snake_case_ )
def lowerCAmelCase (self : str ):
return tuple(self[k] for k in self.keys() )
class UpperCamelCase__ ( __lowercase ,__lowercase ):
@classmethod
def lowerCAmelCase (cls : List[str] , snake_case_ : Tuple ):
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = "longest"
_SCREAMING_SNAKE_CASE : Optional[Any] = "max_length"
_SCREAMING_SNAKE_CASE : Dict = "do_not_pad"
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = "pt"
_SCREAMING_SNAKE_CASE : Dict = "tf"
_SCREAMING_SNAKE_CASE : int = "np"
_SCREAMING_SNAKE_CASE : str = "jax"
class UpperCamelCase__ :
def __init__(self : Dict , snake_case_ : List[ContextManager] ):
__a : Union[str, Any] = context_managers
__a : Optional[Any] = ExitStack()
def __enter__(self : Optional[Any] ):
for context_manager in self.context_managers:
self.stack.enter_context(snake_case_ )
def __exit__(self : Tuple , *snake_case_ : Optional[int] , **snake_case_ : Optional[Any] ):
self.stack.__exit__(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : str = infer_framework(lowerCAmelCase__ )
if framework == "tf":
__a : Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__a : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
__a : str = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : Optional[Any] = model_class.__name__
__a : int = infer_framework(lowerCAmelCase__ )
if framework == "tf":
__a : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__a : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
__a : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __UpperCamelCase ( lowerCAmelCase__ : MutableMapping , lowerCAmelCase__ : str = "" , lowerCAmelCase__ : str = "." ):
def _flatten_dict(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]="" , lowerCAmelCase__ : str="." ):
for k, v in d.items():
__a : Optional[int] = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k
if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
@contextmanager
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str]=None ):
if is_numpy_array(lowerCAmelCase__ ):
return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.T if axes is None else array.permute(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for transpose: {type(lowerCAmelCase__ )}." )
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ):
if is_numpy_array(lowerCAmelCase__ ):
return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.reshape(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for reshape: {type(lowerCAmelCase__ )}." )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=None ):
if is_numpy_array(lowerCAmelCase__ ):
return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for squeeze: {type(lowerCAmelCase__ )}." )
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] ):
if is_numpy_array(lowerCAmelCase__ ):
return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.unsqueeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for expand_dims: {type(lowerCAmelCase__ )}." )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
if is_numpy_array(lowerCAmelCase__ ):
return np.size(lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.size(lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return array.size
else:
raise ValueError(f"Type not supported for expand_dims: {type(lowerCAmelCase__ )}." )
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ):
for key, value in auto_map.items():
if isinstance(lowerCAmelCase__ , (tuple, list) ):
__a : List[Any] = [f"{repo_id}--{v}" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
__a : List[Any] = f"{repo_id}--{value}"
return auto_map
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
for base_class in inspect.getmro(lowerCAmelCase__ ):
__a : str = base_class.__module__
__a : Optional[int] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"Could not infer framework from class {model_class}." )
| 521 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( enum.Enum ):
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = 1
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "generated"
def __init__(self : Dict , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCAmelCase (self : Any , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None , snake_case_ : List[Any]=None , snake_case_ : Dict=None , snake_case_ : Any=None , snake_case_ : int=None , **snake_case_ : List[str] , ):
__a : Dict = {}
if truncation is not None:
__a : str = truncation
__a : Tuple = generate_kwargs
__a : Optional[int] = {}
if return_tensors is not None and return_type is None:
__a : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__a : str = return_type
if clean_up_tokenization_spaces is not None:
__a : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
__a : Dict = self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
if len(snake_case_ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__a : Tuple = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase (self : List[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
return True
def lowerCAmelCase (self : List[Any] , *snake_case_ : str , snake_case_ : Dict ):
__a : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , snake_case_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
__a : List[str] = ([prefix + arg for arg in args[0]],)
__a : List[Any] = True
elif isinstance(args[0] , snake_case_ ):
__a : str = (prefix + args[0],)
__a : int = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" )
__a : Any = self.tokenizer(*snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self : int , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
__a : str = super().__call__(*snake_case_ , **snake_case_ )
if (
isinstance(args[0] , snake_case_ )
and all(isinstance(snake_case_ , snake_case_ ) for el in args[0] )
and all(len(snake_case_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCAmelCase (self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case_ : int ):
__a : Optional[int] = self._parse_and_tokenize(snake_case_ , truncation=snake_case_ , **snake_case_ )
return inputs
def lowerCAmelCase (self : Any , snake_case_ : List[str] , **snake_case_ : Union[str, Any] ):
if self.framework == "pt":
__a , __a : List[str] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
__a , __a : List[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
__a : Optional[Any] = generate_kwargs.get('''min_length''' , self.model.config.min_length )
__a : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(snake_case_ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
__a : str = self.model.generate(**snake_case_ , **snake_case_ )
__a : Union[str, Any] = output_ids.shape[0]
if self.framework == "pt":
__a : Optional[Any] = output_ids.reshape(snake_case_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__a : int = tf.reshape(snake_case_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : List[str]=ReturnType.TEXT , snake_case_ : str=False ):
__a : Optional[int] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__a : Optional[Any] = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
__a : str = {
f"{self.return_name}_text": self.tokenizer.decode(
snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , )
}
records.append(snake_case_ )
return records
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = "summary"
def __call__(self : Optional[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
return super().__call__(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}." )
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" )
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = "translation"
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def lowerCAmelCase (self : Any , *snake_case_ : int , snake_case_ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , snake_case_ : Any=None , snake_case_ : Tuple=None ):
if getattr(self.tokenizer , '''_build_translation_inputs''' , snake_case_ ):
return self.tokenizer._build_translation_inputs(
*snake_case_ , return_tensors=self.framework , truncation=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ )
else:
return super()._parse_and_tokenize(*snake_case_ , truncation=snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : int=None , snake_case_ : str=None , **snake_case_ : Optional[Any] ):
__a , __a , __a : str = super()._sanitize_parameters(**snake_case_ )
if src_lang is not None:
__a : Optional[int] = src_lang
if tgt_lang is not None:
__a : Tuple = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__a : int = kwargs.get('''task''' , self.task )
__a : Union[str, Any] = task.split('''_''' )
if task and len(snake_case_ ) == 4:
# translation, XX, to YY
__a : str = items[1]
__a : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__(self : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : Any ):
return super().__call__(*snake_case_ , **snake_case_ )
| 521 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Union[str, Any] = torch.load(UpperCAmelCase__ , map_location='cpu')
if "model" in sd.keys():
lowerCamelCase : Optional[Any] = torch.load(UpperCAmelCase__ , map_location='cpu')['model']
# pop unnecessary weights
lowerCamelCase : Tuple = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(UpperCAmelCase__)
lowerCamelCase : Optional[Any] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCamelCase : int = sd.pop(UpperCAmelCase__)
lowerCamelCase : Optional[Any] = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
lowerCamelCase : Tuple = sd[key]
# We split QKV in separate Q,K,V
lowerCamelCase : List[str] = key.replace('.qkv_proj.' , '.q_proj.')
lowerCamelCase : Optional[int] = key.replace('.qkv_proj.' , '.k_proj.')
lowerCamelCase : Dict = key.replace('.qkv_proj.' , '.v_proj.')
lowerCamelCase : str = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = torch.split(UpperCAmelCase__ , depth // 3 , dim=0)
lowerCamelCase : Optional[Any] = q
lowerCamelCase : Tuple = k
lowerCamelCase : Any = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str]=None):
lowerCamelCase : int = load_checkpoint(UpperCAmelCase__)
if config is not None:
lowerCamelCase : Dict = OPTConfig.from_pretrained(UpperCAmelCase__)
else:
lowerCamelCase : Tuple = OPTConfig()
lowerCamelCase : List[str] = OPTModel(UpperCAmelCase__).half().eval()
model.load_state_dict(UpperCAmelCase__)
# Check results
Path(UpperCAmelCase__).mkdir(exist_ok=UpperCAmelCase__)
model.save_pretrained(UpperCAmelCase__)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
A = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 449 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( a__):
_lowerCAmelCase = '''segformer'''
def __init__( self, A=3, A=4, A=[2, 2, 2, 2], A=[8, 4, 2, 1], A=[32, 64, 160, 256], A=[7, 3, 3, 3], A=[4, 2, 2, 2], A=[1, 2, 5, 8], A=[4, 4, 4, 4], A="gelu", A=0.0, A=0.0, A=0.1, A=0.02, A=0.1, A=1e-6, A=256, A=255, **A, ):
"""simple docstring"""
super().__init__(**A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.', A, )
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : Any = sr_ratios
lowerCamelCase : List[Any] = hidden_sizes
lowerCamelCase : List[Any] = patch_sizes
lowerCamelCase : Tuple = strides
lowerCamelCase : int = mlp_ratios
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[str] = classifier_dropout_prob
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : Union[str, Any] = drop_path_rate
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : Optional[int] = kwargs.get('reshape_last_stage', A )
lowerCamelCase : Any = semantic_loss_ignore_index
class __snake_case ( a__):
_lowerCAmelCase = version.parse('''1.11''')
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 1e-4
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 12
| 449 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Union[str, Any]=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Tuple=0.6 , UpperCAmelCase_ : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = mask_ratio
SCREAMING_SNAKE_CASE : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : str = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values, labels
def _A ( self : List[str] ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : str = ViTMAEModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Any = ViTMAEForPreTraining(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE : Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Optional[int] = ViTMAEForPreTraining(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCamelCase_ : Union[str, Any] = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : int = False
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : int = ViTMAEModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def _A ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _A ( self : Optional[Any] ):
pass
def _A ( self : str ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def _A ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
# make masks reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(UpperCAmelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class.from_pretrained(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Make sure we don't have nans
SCREAMING_SNAKE_CASE : List[Any] = after_outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase_ , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _A ( self : Tuple ):
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _A ( self : Tuple ):
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _A ( self : Optional[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _A ( self : List[str] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self : str ):
pass
@slow
def _A ( self : Optional[Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = ViTMAEModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : Dict ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _A ( self : int ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : int = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE : Tuple = ViTMAEConfig()
SCREAMING_SNAKE_CASE : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ ) )
# verify the logits
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_ ) , atol=1E-4 ) )
| 62 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 117 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_ = Lock()
def UpperCamelCase__ ( _lowercase : Tuple , _lowercase : str , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : int , _lowercase : str ) -> List[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__UpperCAmelCase: Optional[int] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__UpperCAmelCase: Optional[int] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__UpperCAmelCase: List[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__UpperCAmelCase: Optional[int] = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def UpperCamelCase__ ( _lowercase : Dict ) -> Any:
__UpperCAmelCase: Union[str, Any] = []
__UpperCAmelCase: List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__UpperCAmelCase: Any = Pipe()
__UpperCAmelCase: Tuple = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__UpperCAmelCase: Dict = temp_rs
__UpperCAmelCase: Optional[int] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
__UpperCAmelCase: List[Any] = Pipe()
__UpperCAmelCase: int = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__UpperCAmelCase: Dict = temp_rs
__UpperCAmelCase: List[str] = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
__UpperCAmelCase: List[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase__ ( ) -> str:
__UpperCAmelCase: Any = list(range(1_0 , 0 , -1 ) )
print("""Initial List""" )
print(*_lowercase )
__UpperCAmelCase: List[Any] = odd_even_transposition(_lowercase )
print("""Sorted List\n""" )
print(*_lowercase )
if __name__ == "__main__":
main() | 714 | '''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = JukeboxTokenizer
__lowerCAmelCase = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
import torch
__UpperCAmelCase: Dict = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
__UpperCAmelCase: List[str] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
__UpperCAmelCase: int = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
import torch
__UpperCAmelCase: Any = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
__UpperCAmelCase: Tuple = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
__UpperCAmelCase: int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 466 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowercase_ = True
except ImportError:
lowercase_ = False
try:
from torch.hub import _get_torch_home
lowercase_ = _get_torch_home()
except ImportError:
lowercase_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
lowercase_ = os.path.join(torch_cache_home, """transformers""")
lowercase_ = """https://cdn.huggingface.co"""
lowercase_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
lowercase_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
lowercase_ = os.path.join(PATH, """config.yaml""")
lowercase_ = os.path.join(PATH, """attributes.txt""")
lowercase_ = os.path.join(PATH, """objects.txt""")
lowercase_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
lowercase_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
lowercase_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
lowercase_ = """pytorch_model.bin"""
lowercase_ = """config.yaml"""
def a__ ( snake_case=OBJECTS , snake_case=ATTRIBUTES ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = []
with open(snake_case ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__SCREAMING_SNAKE_CASE : List[str] = []
with open(snake_case ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict()
with open(snake_case , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = pkl.load(snake_case )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__SCREAMING_SNAKE_CASE : int = ckp.pop(snake_case )
if isinstance(snake_case , np.ndarray ):
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(snake_case )
else:
assert isinstance(snake_case , torch.tensor ), type(snake_case )
__SCREAMING_SNAKE_CASE : Any = v
return r
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = {}
def __init__( self : Dict , _A : dict , _A : str = "root" , _A : str=0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = name
__SCREAMING_SNAKE_CASE : Dict = level
__SCREAMING_SNAKE_CASE : Tuple = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(_A )
__SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(_A )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[int] = Config(_A , name=_A , level=level + 1 )
__SCREAMING_SNAKE_CASE : str = v
setattr(self , _A , _A )
__SCREAMING_SNAKE_CASE : Optional[int] = d
def __repr__( self : List[str] ):
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = val
__SCREAMING_SNAKE_CASE : Optional[Any] = val
__SCREAMING_SNAKE_CASE : Dict = key.split('''.''' )
__SCREAMING_SNAKE_CASE : List[str] = len(_A ) - 1
__SCREAMING_SNAKE_CASE : Optional[int] = self._pointer
if len(_A ) > 1:
for i, l in enumerate(_A ):
if hasattr(self , _A ) and isinstance(getattr(self , _A ) , _A ):
setattr(getattr(self , _A ) , '''.'''.join(levels[i:] ) , _A )
if l == last_level:
__SCREAMING_SNAKE_CASE : List[str] = val
else:
__SCREAMING_SNAKE_CASE : Optional[int] = pointer[l]
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return self._pointer
def UpperCAmelCase__ ( self : Any , _A : int , _A : Tuple ):
"""simple docstring"""
with open(F'''{file_name}''' , '''w''' ) as stream:
dump(_A , _A )
def UpperCAmelCase__ ( self : Optional[Any] , _A : List[Any] , _A : Tuple ):
"""simple docstring"""
with open(F'''{file_name}''' , '''w''' ) as stream:
json.dump(_A , _A )
@staticmethod
def UpperCAmelCase__ ( _A : List[Any] ):
"""simple docstring"""
with open(_A ) as stream:
__SCREAMING_SNAKE_CASE : Optional[Any] = load(_A , Loader=_A )
return data
def __str__( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ''' '''
if self._name != "root":
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{t * (self._level-1)}{self._name}:\n'''
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
__SCREAMING_SNAKE_CASE : str = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_A , _A ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_A ).__name__})\n'''
__SCREAMING_SNAKE_CASE : Optional[Any] = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , _A : str , **_A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(_A , **_A )
return cls(_A )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , _A : str , **_A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''cache_dir''' , _A )
__SCREAMING_SNAKE_CASE : str = kwargs.pop('''force_download''' , _A )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''resume_download''' , _A )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''proxies''' , _A )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''local_files_only''' , _A )
if os.path.isdir(_A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_A , _A )
elif os.path.isfile(_A ) or is_remote_url(_A ):
__SCREAMING_SNAKE_CASE : Dict = pretrained_model_name_or_path
else:
__SCREAMING_SNAKE_CASE : List[str] = hf_bucket_url(_A , filename=_A , use_cdn=_A )
try:
# Load from URL or cache if already cached
__SCREAMING_SNAKE_CASE : str = cached_path(
_A , cache_dir=_A , force_download=_A , proxies=_A , resume_download=_A , local_files_only=_A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__SCREAMING_SNAKE_CASE : str = Config.load_yaml(_A )
except EnvironmentError:
__SCREAMING_SNAKE_CASE : Optional[int] = '''Can\'t load config for'''
raise EnvironmentError(_A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(_A ), kwargs
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load('''dump.pt''' , map_location=in_tensor.device )
__SCREAMING_SNAKE_CASE : int = in_tensor.numpy()
__SCREAMING_SNAKE_CASE : List[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case , snake_case , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(snake_case , snake_case , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = urlparse(snake_case )
return parsed.scheme in ("http", "https")
def a__ ( snake_case , snake_case , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__SCREAMING_SNAKE_CASE : Optional[Any] = '''/''' not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def a__ ( snake_case , snake_case , snake_case=None , snake_case=0 , snake_case=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case , snake_case ) for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
__SCREAMING_SNAKE_CASE : Dict = {'''user-agent''': ua}
if resume_size > 0:
__SCREAMING_SNAKE_CASE : List[Any] = '''bytes=%d-''' % (resume_size,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = requests.get(snake_case , stream=snake_case , proxies=snake_case , headers=snake_case )
if response.status_code == 416: # Range not satisfiable
return
__SCREAMING_SNAKE_CASE : str = response.headers.get('''Content-Length''' )
__SCREAMING_SNAKE_CASE : Dict = resume_size + int(snake_case ) if content_length is not None else None
__SCREAMING_SNAKE_CASE : Dict = tqdm(
unit='''B''' , unit_scale=snake_case , total=snake_case , initial=snake_case , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case ) )
temp_file.write(snake_case )
progress.close()
def a__ ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=10 , snake_case=False , snake_case=None , snake_case=False , ):
"""simple docstring"""
if cache_dir is None:
__SCREAMING_SNAKE_CASE : Optional[int] = TRANSFORMERS_CACHE
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = str(snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = None
if not local_files_only:
try:
__SCREAMING_SNAKE_CASE : List[Any] = requests.head(snake_case , allow_redirects=snake_case , proxies=snake_case , timeout=snake_case )
if response.status_code == 200:
__SCREAMING_SNAKE_CASE : str = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__SCREAMING_SNAKE_CASE : Tuple = url_to_filename(snake_case , snake_case )
# get cache path to put the file
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(snake_case , snake_case )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case ):
return cache_path
else:
__SCREAMING_SNAKE_CASE : List[str] = [
file
for file in fnmatch.filter(os.listdir(snake_case ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case ) > 0:
return os.path.join(snake_case , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__SCREAMING_SNAKE_CASE : List[str] = cache_path + '''.lock'''
with FileLock(snake_case ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__SCREAMING_SNAKE_CASE : Optional[int] = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case , '''a+b''' ) as f:
yield f
__SCREAMING_SNAKE_CASE : Dict = _resumable_file_manager
if os.path.exists(snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = os.stat(snake_case ).st_size
else:
__SCREAMING_SNAKE_CASE : str = 0
else:
__SCREAMING_SNAKE_CASE : Dict = partial(tempfile.NamedTemporaryFile , dir=snake_case , delete=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case , temp_file.name , )
http_get(
snake_case , snake_case , proxies=snake_case , resume_size=snake_case , user_agent=snake_case , )
os.replace(temp_file.name , snake_case )
__SCREAMING_SNAKE_CASE : Dict = {'''url''': url, '''etag''': etag}
__SCREAMING_SNAKE_CASE : List[str] = cache_path + '''.json'''
with open(snake_case , '''w''' ) as meta_file:
json.dump(snake_case , snake_case )
return cache_path
def a__ ( snake_case , snake_case=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = url.encode('''utf-8''' )
__SCREAMING_SNAKE_CASE : Tuple = shaaaa(snake_case )
__SCREAMING_SNAKE_CASE : str = url_hash.hexdigest()
if etag:
__SCREAMING_SNAKE_CASE : Any = etag.encode('''utf-8''' )
__SCREAMING_SNAKE_CASE : Optional[int] = shaaaa(snake_case )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def a__ ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=False , snake_case=False , ):
"""simple docstring"""
if cache_dir is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : Optional[int] = str(snake_case )
if isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = str(snake_case )
if is_remote_url(snake_case ):
# URL, so get it from the cache (downloading if necessary)
__SCREAMING_SNAKE_CASE : Optional[Any] = get_from_cache(
snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , user_agent=snake_case , local_files_only=snake_case , )
elif os.path.exists(snake_case ):
# File, and it exists.
__SCREAMING_SNAKE_CASE : Optional[Any] = url_or_filename
elif urlparse(snake_case ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case ) )
if extract_compressed_file:
if not is_zipfile(snake_case ) and not tarfile.is_tarfile(snake_case ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.split(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , snake_case )
if os.path.isdir(snake_case ) and os.listdir(snake_case ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__SCREAMING_SNAKE_CASE : Dict = output_path + '''.lock'''
with FileLock(snake_case ):
shutil.rmtree(snake_case , ignore_errors=snake_case )
os.makedirs(snake_case )
if is_zipfile(snake_case ):
with ZipFile(snake_case , '''r''' ) as zip_file:
zip_file.extractall(snake_case )
zip_file.close()
elif tarfile.is_tarfile(snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tarfile.open(snake_case )
tar_file.extractall(snake_case )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case ) )
return output_path_extracted
return output_path
def a__ ( snake_case , snake_case="," ):
"""simple docstring"""
assert isinstance(snake_case , snake_case )
if os.path.isfile(snake_case ):
with open(snake_case ) as f:
__SCREAMING_SNAKE_CASE : Optional[int] = eval(f.read() )
else:
__SCREAMING_SNAKE_CASE : int = requests.get(snake_case )
try:
__SCREAMING_SNAKE_CASE : str = requests.json()
except Exception:
__SCREAMING_SNAKE_CASE : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval(snake_case )
except Exception:
__SCREAMING_SNAKE_CASE : Any = data.split('''\n''' )
req.close()
return data
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = requests.get(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case )
with open(snake_case , '''rb''' ) as stream:
__SCREAMING_SNAKE_CASE : str = pkl.load(snake_case )
__SCREAMING_SNAKE_CASE : int = weights.pop('''model''' )
__SCREAMING_SNAKE_CASE : str = {}
for k, v in model.items():
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(snake_case )
if "running_var" in k:
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0] )
__SCREAMING_SNAKE_CASE : Optional[int] = k.replace('''running_var''' , '''num_batches_tracked''' )
__SCREAMING_SNAKE_CASE : Any = zero
return new
def a__ ( ):
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(snake_case , os.pardir ) )}/demo.ipynb''' )
def a__ ( snake_case , snake_case="RGB" ):
"""simple docstring"""
assert isinstance(snake_case , snake_case )
if os.path.isfile(snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = cva.imread(snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = get_image_from_url(snake_case )
assert img is not None, F'''could not connect to: {im}'''
__SCREAMING_SNAKE_CASE : int = cva.cvtColor(snake_case , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__SCREAMING_SNAKE_CASE : Dict = img[:, :, ::-1]
return img
def a__ ( snake_case , snake_case=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(snake_case ) , snake_case ))
| 74 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 0 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : List[str] = logging.get_logger(__name__)
a : Dict = {'vocab_file': 'spiece.model'}
a : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
a : Union[str, Any] = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class a ( __a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowercase_ : str , lowercase_ : Union[str, Any]=False , lowercase_ : Any=False , lowercase_ : Tuple=False , lowercase_ : Tuple=None , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Union[str, Any] , ):
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
snake_case_ = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
snake_case_ = """<|endoftext|>""" if eos_token is None else eos_token
snake_case_ = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
snake_case_ = unk_token if pad_token is None else pad_token
snake_case_ = eos_token if bos_token is None else bos_token
else:
snake_case_ = """<pad>""" if pad_token is None else pad_token
snake_case_ = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
# Used for whitespace normalization in input texts
# fmt : off
snake_case_ = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
snake_case_ = re.compile(
F"[{''.join(map(a_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self : Any ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Any , lowercase_ : Optional[int] ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def A_ ( self : List[str] ):
return len(self.sp_model )
def A_ ( self : Dict , lowercase_ : str ):
snake_case_ = self.non_printing_characters_re.sub('''''' , a_ )
# Normalize whitespaces
snake_case_ = """""".join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
snake_case_ = unicodedata.normalize('''NFC''' , a_ )
return text
def A_ ( self : str , lowercase_ : str , **lowercase_ : Union[str, Any] ):
snake_case_ = self.preprocess_text(a_ )
return self.sp_model.encode(a_ , out_type=a_ )
def A_ ( self : List[Any] , lowercase_ : str ):
return self.sp_model.PieceToId(a_ )
def A_ ( self : Tuple , lowercase_ : int ):
return self.sp_model.IdToPiece(a_ )
@staticmethod
def A_ ( lowercase_ : str ):
return out_string
def A_ ( self : Tuple , lowercase_ : List[str] ):
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(a_ )
snake_case_ = False
out_string += self.sp_model.decode(a_ )
return out_string
def A_ ( self : Union[str, Any] ):
snake_case_ = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(a_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def A_ ( self : Any , lowercase_ : Union[str, List[str]] , lowercase_ : Union[str, bool] = False ):
if isinstance(a_ , a_ ):
snake_case_ = self.preprocess_text(a_ )
snake_case_ = self.sp_model.encode(a_ )
else:
snake_case_ = [self.preprocess_text(a_ ) for t in text]
snake_case_ = self.sp_model.encode(a_ )
if return_tensors is True or return_tensors == "pt":
snake_case_ = torch.tensor(a_ )
return token_ids
def A_ ( self : Optional[int] , lowercase_ : Union[int, List[int]] ):
return self.sp_model.decode(a_ )
def A_ ( self : Tuple , lowercase_ : "Conversation" ):
snake_case_ = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
snake_case_ = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(a_ ) + F"{self.bos_token}Bot:"
)
return self.encode(text=a_ )
| 706 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
snake_case_ = math.sqrt(__UpperCAmelCase )
snake_case_ = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
snake_case_ = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
snake_case_ = np.zeros((kernel_size, kernel_size) )
for i in range(0, __UpperCAmelCase ):
for j in range(0, __UpperCAmelCase ):
snake_case_ = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, ) -> np.ndarray:
'''simple docstring'''
snake_case_ = np.zeros(img.shape )
snake_case_ = get_gauss_kernel(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ ,snake_case_ = img.shape
for i in range(kernel_size // 2, size_x - kernel_size // 2 ):
for j in range(kernel_size // 2, size_y - kernel_size // 2 ):
snake_case_ = get_slice(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
snake_case_ = img_s - img_s[kernel_size // 2, kernel_size // 2]
snake_case_ = vec_gaussian(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = np.multiply(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = np.multiply(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = np.sum(__UpperCAmelCase ) / np.sum(__UpperCAmelCase )
snake_case_ = val
return imga
def __magic_name__ ( __UpperCAmelCase ) -> tuple:
'''simple docstring'''
snake_case_ = args[1] if args[1:] else '''../image_data/lena.jpg'''
snake_case_ = float(args[2] ) if args[2:] else 1.0
snake_case_ = float(args[3] ) if args[3:] else 1.0
if args[4:]:
snake_case_ = int(args[4] )
snake_case_ = kernel_size + abs(kernel_size % 2 - 1 )
else:
snake_case_ = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a ,a ,a ,a : Optional[Any] = parse_args(sys.argv)
a : List[Any] = cva.imread(filename, 0)
cva.imshow('input image', img)
a : Tuple = img / 255
a : Tuple = out.astype('float32')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Union[str, Any] = out * 255
a : Dict = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 593 | 0 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase: Optional[int] =logging.get_logger(__name__)
lowerCAmelCase: Optional[int] ="T5Config"
def __snake_case ( __A ,__A ,__A ) -> jnp.ndarray:
lowercase : Optional[int] = jnp.zeros_like(__A )
lowercase : str = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowercase : Tuple = shifted_input_ids.at[:, 0].set(__A )
lowercase : List[Any] = jnp.where(shifted_input_ids == -100 ,__A ,__A )
return shifted_input_ids
class lowerCamelCase__ ( __UpperCamelCase ):
__UpperCAmelCase = """mt5"""
__UpperCAmelCase = MTaConfig
class lowerCamelCase__ ( __UpperCamelCase ):
__UpperCAmelCase = """mt5"""
__UpperCAmelCase = MTaConfig
class lowerCamelCase__ ( __UpperCamelCase ):
__UpperCAmelCase = """mt5"""
__UpperCAmelCase = MTaConfig
| 607 |
"""simple docstring"""
def __snake_case ( ) -> Union[str, Any]:
lowercase : str = 0
for i in range(1 ,1001 ):
total += i**i
return str(__A )[-10:]
if __name__ == "__main__":
print(solution())
| 607 | 1 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Distribution , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]=0 ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = 1.0 if scale is None else scale
lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def __A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def __A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.variance.sqrt()
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE : List[Any] ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = args_dim
lowerCAmelCase = nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
lowerCAmelCase = domain_map
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : torch.Tensor ) -> Tuple[torch.Tensor]:
"""simple docstring"""
lowerCAmelCase = [proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowerCAmelCase = function
def __A ( self : str , SCREAMING_SNAKE_CASE : Tuple , *SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
def __init__( self : int , SCREAMING_SNAKE_CASE : int = 1 ) -> None:
"""simple docstring"""
lowerCAmelCase = dim
lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def __A ( self : str , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , ) -> Distribution:
"""simple docstring"""
lowerCAmelCase = self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __A ( self : Any ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def __A ( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def __A ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 0.0
def __A ( self : Any , SCREAMING_SNAKE_CASE : int ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __A ( self : Optional[int] , *SCREAMING_SNAKE_CASE : torch.Tensor ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def __A ( SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase = StudentT
@classmethod
def __A ( cls : Dict , SCREAMING_SNAKE_CASE : torch.Tensor , SCREAMING_SNAKE_CASE : torch.Tensor , SCREAMING_SNAKE_CASE : torch.Tensor ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCAmelCase = 2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = {"loc": 1, "scale": 1}
lowerCAmelCase = Normal
@classmethod
def __A ( cls : Optional[int] , SCREAMING_SNAKE_CASE : torch.Tensor , SCREAMING_SNAKE_CASE : torch.Tensor ) -> int:
"""simple docstring"""
lowerCAmelCase = cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = {"total_count": 1, "logits": 1}
lowerCAmelCase = NegativeBinomial
@classmethod
def __A ( cls : Any , SCREAMING_SNAKE_CASE : torch.Tensor , SCREAMING_SNAKE_CASE : torch.Tensor ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : int ) -> Distribution:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ) -> Distribution:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 159 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase : List[str] = logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : int , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase = deprecated_arg[3:]
setattr(self , SCREAMING_SNAKE_CASE , not kwargs.pop(SCREAMING_SNAKE_CASE ) )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
lowerCAmelCase = kwargs.pop("torchscript" , self.torchscript )
lowerCAmelCase = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
lowerCAmelCase = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'Trace the models using torchscript'} )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
lowerCAmelCase = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def __A ( self : str ) -> Tuple["torch.device", int]:
"""simple docstring"""
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
lowerCAmelCase = torch.device("cpu" )
lowerCAmelCase = 0
elif is_torch_tpu_available():
lowerCAmelCase = xm.xla_device()
lowerCAmelCase = 0
else:
lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase = torch.cuda.device_count()
return device, n_gpu
@property
def __A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def __A ( self : Optional[int] ) -> int:
"""simple docstring"""
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __A ( self : Optional[int] ) -> "torch.device":
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def __A ( self : str ) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def __A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.n_gpu > 0
| 159 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ='''megatron-bert'''
def __init__( self,__SCREAMING_SNAKE_CASE=2_90_56,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=24,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=5_12,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=1e-12,__SCREAMING_SNAKE_CASE=0,__SCREAMING_SNAKE_CASE="absolute",__SCREAMING_SNAKE_CASE=True,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__,**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
| 689 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__lowerCamelCase : str = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Dict = min_resolution
__lowerCamelCase : Tuple = max_resolution
__lowerCamelCase : Dict = do_resize
__lowerCamelCase : List[Any] = size
__lowerCamelCase : Tuple = do_normalize
__lowerCamelCase : Any = image_mean
__lowerCamelCase : List[str] = image_std
__lowerCamelCase : List[Any] = do_rescale
__lowerCamelCase : str = rescale_factor
__lowerCamelCase : Tuple = do_pad
def lowerCAmelCase ( self : Dict):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False):
if not batched:
__lowerCamelCase : Optional[Any] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image):
__lowerCamelCase , __lowerCamelCase : Any = image.size
else:
__lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w)
__lowerCamelCase : Tuple = self.size['shortest_edge']
elif w > h:
__lowerCamelCase : Union[str, Any] = self.size['shortest_edge']
__lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h)
else:
__lowerCamelCase : List[Any] = self.size['shortest_edge']
__lowerCamelCase : Optional[int] = self.size['shortest_edge']
else:
__lowerCamelCase : List[str] = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0]
__lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = DetaImageProcessingTester(self)
@property
def lowerCAmelCase ( self : Any):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size'))
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3})
self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
pass
def lowerCAmelCase ( self : List[str]):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image)
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : str):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray)
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : int):
# Initialize image_processing
__lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor)
# Test not batched input
__lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def lowerCAmelCase ( self : Optional[Any]):
# prepare image and target
__lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f:
__lowerCamelCase : List[str] = json.loads(f.read())
__lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__lowerCamelCase : Optional[int] = DetaImageProcessor()
__lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : int = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify orig_size
__lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
@slow
def lowerCAmelCase ( self : str):
# prepare image, target and masks_path
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f:
__lowerCamelCase : Tuple = json.loads(f.read())
__lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic')
__lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : int = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify masks
__lowerCamelCase : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__)
# verify orig_size
__lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
| 652 | 0 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCamelCase__ ( _lowerCamelCase : bytes , _lowerCamelCase : int ) -> np.array:
lowerCamelCase_ = F'''{sampling_rate}'''
lowerCamelCase_ = '1'
lowerCamelCase_ = 'f32le'
lowerCamelCase_ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase_ = ffmpeg_process.communicate(_lowerCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCamelCase_ = output_stream[0]
lowerCamelCase_ = np.frombuffer(_lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : str = "f32le" , ) -> int:
lowerCamelCase_ = F'''{sampling_rate}'''
lowerCamelCase_ = '1'
if format_for_conversion == "s16le":
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase_ = platform.system()
if system == "Linux":
lowerCamelCase_ = 'alsa'
lowerCamelCase_ = 'default'
elif system == "Darwin":
lowerCamelCase_ = 'avfoundation'
lowerCamelCase_ = ':0'
elif system == "Windows":
lowerCamelCase_ = 'dshow'
lowerCamelCase_ = 'default'
lowerCamelCase_ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase_ = _ffmpeg_stream(_lowerCamelCase , _lowerCamelCase )
for item in iterator:
yield item
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , _lowerCamelCase : str = "f32le" , ) -> Union[str, Any]:
if stream_chunk_s is not None:
lowerCamelCase_ = stream_chunk_s
else:
lowerCamelCase_ = chunk_length_s
lowerCamelCase_ = ffmpeg_microphone(_lowerCamelCase , _lowerCamelCase , format_for_conversion=_lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase_ = np.intaa
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = np.floataa
lowerCamelCase_ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase_ = chunk_length_s / 6
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowerCamelCase , (int, float) ):
lowerCamelCase_ = [stride_length_s, stride_length_s]
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase_ = datetime.datetime.now()
lowerCamelCase_ = datetime.timedelta(seconds=_lowerCamelCase )
for item in chunk_bytes_iter(_lowerCamelCase , _lowerCamelCase , stride=(stride_left, stride_right) , stream=_lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase_ = np.frombuffer(item['raw'] , dtype=_lowerCamelCase )
lowerCamelCase_ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCamelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Tuple[int, int] , _lowerCamelCase : bool = False ) -> Optional[Any]:
lowerCamelCase_ = b''
lowerCamelCase_ , lowerCamelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(_lowerCamelCase ) < chunk_len:
lowerCamelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase_ = (_stride_left, stride_right)
lowerCamelCase_ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCamelCase_ = False
yield item
lowerCamelCase_ = stride_left
lowerCamelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowerCamelCase ) > stride_left:
lowerCamelCase_ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCamelCase_ = False
yield item
def lowerCamelCase__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int ) -> Optional[int]:
lowerCamelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(_lowerCamelCase , stdout=subprocess.PIPE , bufsize=_lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase_ = ffmpeg_process.stdout.read(_lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 700 |
"""simple docstring"""
import math
def lowerCamelCase__ ( _lowerCamelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( _lowerCamelCase : float = 0.1 ) -> int:
lowerCamelCase_ = 3
lowerCamelCase_ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__UpperCAmelCase : Union[str, Any] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase_ ( __snake_case : Tuple ) -> List[Any]:
'''simple docstring'''
if isinstance(__snake_case , torch.Tensor ):
return image
elif isinstance(__snake_case , PIL.Image.Image ):
snake_case__ :Optional[Any] = [image]
snake_case__ :Optional[int] = [trans(img.convert("RGB" ) ) for img in image]
snake_case__ :List[Any] = torch.stack(__snake_case )
return image
class _snake_case ( _A ):
def __init__( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case__ :int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase ,scheduler=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[int]:
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}' )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
# get the original timestep using init_timestep
snake_case__ :List[Any] = min(int(num_inference_steps * strength ) ,UpperCamelCase )
snake_case__ :List[str] = max(num_inference_steps - init_timestep ,0 )
snake_case__ :List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ) -> List[str]:
if not isinstance(UpperCamelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase )}' )
snake_case__ :Dict = image.to(device=UpperCamelCase ,dtype=UpperCamelCase )
if isinstance(UpperCamelCase ,UpperCamelCase ) and len(UpperCamelCase ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(UpperCamelCase )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
snake_case__ :Dict = init_latents.shape
snake_case__ :Dict = randn_tensor(UpperCamelCase ,generator=UpperCamelCase ,device=UpperCamelCase ,dtype=UpperCamelCase )
# get latents
print("add noise to latents at timestep" ,UpperCamelCase )
snake_case__ :Dict = self.scheduler.add_noise(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :int = init_latents
return latents
@torch.no_grad()
def __call__( self ,UpperCamelCase = None ,UpperCamelCase = 0.8 ,UpperCamelCase = 1 ,UpperCamelCase = None ,UpperCamelCase = 0.0 ,UpperCamelCase = 50 ,UpperCamelCase = None ,UpperCamelCase = "pil" ,UpperCamelCase = True ,) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(UpperCamelCase )
# 2. Preprocess image
snake_case__ :List[Any] = preprocess(UpperCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCamelCase ,device=self.device )
snake_case__ , snake_case__ :int = self.get_timesteps(UpperCamelCase ,UpperCamelCase ,self.device )
snake_case__ :str = timesteps[:1].repeat(UpperCamelCase )
# 4. Prepare latent variables
snake_case__ :Any = self.prepare_latents(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,self.unet.dtype ,self.device ,UpperCamelCase )
snake_case__ :List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCamelCase ):
# 1. predict noise model_output
snake_case__ :Optional[Any] = self.unet(UpperCamelCase ,UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case__ :str = self.scheduler.step(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,eta=UpperCamelCase ,use_clipped_model_output=UpperCamelCase ,generator=UpperCamelCase ,).prev_sample
snake_case__ :int = (image / 2 + 0.5).clamp(0 ,1 )
snake_case__ :Dict = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
snake_case__ :Any = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCamelCase ) | 241 |
from collections.abc import Generator
from math import sin
def lowercase_ ( __snake_case : bytes ) -> bytes:
'''simple docstring'''
if len(__snake_case ) != 32:
raise ValueError("Input must be of length 32" )
snake_case__ :Any = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase_ ( __snake_case : int ) -> bytes:
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
snake_case__ :List[str] = format(__snake_case , "08x" )[-8:]
snake_case__ :List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase_ ( __snake_case : bytes ) -> bytes:
'''simple docstring'''
snake_case__ :Dict = b""
for char in message:
bit_string += format(__snake_case , "08b" ).encode("utf-8" )
snake_case__ :str = format(len(__snake_case ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__snake_case ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase_ ( __snake_case : bytes ) -> Generator[list[int], None, None]:
'''simple docstring'''
if len(__snake_case ) % 5_12 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__snake_case ) , 5_12 ):
snake_case__ :Tuple = bit_string[pos : pos + 5_12]
snake_case__ :List[Any] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase_ ( __snake_case : int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
snake_case__ :Union[str, Any] = format(__snake_case , "032b" )
snake_case__ :Union[str, Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__snake_case , 2 )
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
return (a + b) % 2**32
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase_ ( __snake_case : bytes ) -> bytes:
'''simple docstring'''
snake_case__ :Tuple = preprocess(__snake_case )
snake_case__ :int = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
snake_case__ :Tuple = 0X67_452_301
snake_case__ :str = 0Xef_cda_b89
snake_case__ :Dict = 0X98_bad_cfe
snake_case__ :List[str] = 0X10_325_476
snake_case__ :str = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__snake_case ):
snake_case__ :List[Any] = aa
snake_case__ :List[str] = ba
snake_case__ :List[Any] = ca
snake_case__ :Tuple = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
snake_case__ :Dict = d ^ (b & (c ^ d))
snake_case__ :List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
snake_case__ :str = c ^ (d & (b ^ c))
snake_case__ :Optional[int] = (5 * i + 1) % 16
elif i <= 47:
snake_case__ :Union[str, Any] = b ^ c ^ d
snake_case__ :Union[str, Any] = (3 * i + 5) % 16
else:
snake_case__ :List[str] = c ^ (b | not_aa(__snake_case ))
snake_case__ :Dict = (7 * i) % 16
snake_case__ :Union[str, Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
snake_case__ :Optional[Any] = d
snake_case__ :List[str] = c
snake_case__ :Union[str, Any] = b
snake_case__ :Dict = sum_aa(__snake_case , left_rotate_aa(__snake_case , shift_amounts[i] ) )
# Add hashed chunk to running total
snake_case__ :Any = sum_aa(__snake_case , __snake_case )
snake_case__ :List[Any] = sum_aa(__snake_case , __snake_case )
snake_case__ :Any = sum_aa(__snake_case , __snake_case )
snake_case__ :int = sum_aa(__snake_case , __snake_case )
snake_case__ :Optional[Any] = reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | 241 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: List[str] = logging.get_logger(__name__)
_lowercase: List[str] = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class lowerCamelCase__ ( _UpperCamelCase ):
UpperCamelCase__ ="transfo-xl"
UpperCamelCase__ =["mems"]
UpperCamelCase__ ={
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , lowercase__ : Optional[Any]=26_77_35 , lowercase__ : Dict=[2_00_00, 4_00_00, 20_00_00] , lowercase__ : Union[str, Any]=10_24 , lowercase__ : Optional[Any]=10_24 , lowercase__ : Union[str, Any]=16 , lowercase__ : Tuple=64 , lowercase__ : Optional[int]=40_96 , lowercase__ : List[str]=4 , lowercase__ : Union[str, Any]=False , lowercase__ : Tuple=18 , lowercase__ : Optional[int]=16_00 , lowercase__ : List[str]=10_00 , lowercase__ : Union[str, Any]=True , lowercase__ : Tuple=True , lowercase__ : Dict=0 , lowercase__ : str=-1 , lowercase__ : Dict=True , lowercase__ : List[Any]=0.1 , lowercase__ : List[Any]=0.0 , lowercase__ : List[str]=True , lowercase__ : str="normal" , lowercase__ : Tuple=0.0_1 , lowercase__ : int=0.0_1 , lowercase__ : Optional[int]=0.0_2 , lowercase__ : List[Any]=1e-5 , lowercase__ : int=0 , **lowercase__ : List[str] , ):
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowercase__ )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowercase__ , **lowercase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 715 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =(UniPCMultistepScheduler,)
UpperCamelCase__ =(("num_inference_steps", 2_5),)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **lowercase__ : Dict ):
_lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**lowercase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[str]=0 , **lowercase__ : Union[str, Any] ):
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , lowercase__ )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
_lowerCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(lowercase__ , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_lowerCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : Tuple=0 , **lowercase__ : List[Any] ):
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , lowercase__ )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
_lowerCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_lowerCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : int=None , **lowercase__ : List[Any] ):
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(lowercase__ , lowercase__ )
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , lowercase__ )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , 'set_timesteps' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , 'set_timesteps' ):
_lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase = scheduler.timesteps[5]
_lowerCAmelCase = scheduler.timesteps[6]
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : str ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=lowercase__ )
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=lowercase__ )
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.check_over_configs(thresholding=lowercase__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , solver_order=lowercase__ , solver_type=lowercase__ , )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , )
_lowerCAmelCase = self.full_loop(
solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , )
assert not torch.isnan(lowercase__ ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.check_over_configs(lower_order_final=lowercase__ )
self.check_over_configs(lower_order_final=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowercase__ , time_step=0 )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=lowercase__ , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(lowercase__ , lowercase__ )
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
assert sample.dtype == torch.floataa
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **lowercase__ : List[Any] ):
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 225 | 0 |
from __future__ import annotations
_A : int = """#"""
class __snake_case :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = trie[char]
SCREAMING_SNAKE_CASE__ = True
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE__ = trie[char]
else:
return []
return self._elements(A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE__ = [''' '''] if c == END else [(c + s) for s in self._elements(A_ )]
result.extend(A_ )
return tuple(A_ )
_A : Any = Trie()
_A : Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __snake_case ( lowerCAmelCase_ ) -> tuple:
SCREAMING_SNAKE_CASE__ = trie.find_word(lowerCAmelCase_ )
return tuple(string + word for word in suffixes )
def __snake_case ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 100 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 0 |
def __a ( __UpperCAmelCase : int = 10**12 ) -> int:
"""simple docstring"""
lowerCamelCase_ : str = 1
lowerCamelCase_ : str = 0
lowerCamelCase_ : Any = 1
lowerCamelCase_ : Optional[int] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"{solution() = }")
| 712 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = "gpt_bigcode"
lowerCamelCase = ["past_key_values"]
lowerCamelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , __magic_name__ : List[str]=5_0257 , __magic_name__ : Optional[int]=1024 , __magic_name__ : Dict=768 , __magic_name__ : str=12 , __magic_name__ : Union[str, Any]=12 , __magic_name__ : Tuple=None , __magic_name__ : List[str]="gelu_pytorch_tanh" , __magic_name__ : int=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Union[str, Any]=1e-5 , __magic_name__ : List[str]=0.02 , __magic_name__ : Tuple=True , __magic_name__ : List[Any]=True , __magic_name__ : Tuple=5_0256 , __magic_name__ : str=5_0256 , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[Any]=True , **__magic_name__ : int , ) -> Optional[Any]:
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Union[str, Any] = n_positions
lowerCamelCase_ : str = n_embd
lowerCamelCase_ : Tuple = n_layer
lowerCamelCase_ : int = n_head
lowerCamelCase_ : Union[str, Any] = n_inner
lowerCamelCase_ : int = activation_function
lowerCamelCase_ : Union[str, Any] = resid_pdrop
lowerCamelCase_ : Optional[Any] = embd_pdrop
lowerCamelCase_ : Dict = attn_pdrop
lowerCamelCase_ : Union[str, Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Optional[int] = scale_attn_weights
lowerCamelCase_ : Any = use_cache
lowerCamelCase_ : Dict = attention_softmax_in_fpaa
lowerCamelCase_ : Any = scale_attention_softmax_in_fpaa
lowerCamelCase_ : Tuple = multi_query
lowerCamelCase_ : Tuple = bos_token_id
lowerCamelCase_ : int = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 253 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowercase_ (unittest.TestCase ):
def __a ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE_ = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a_ , cache_dir=a_ )
SCREAMING_SNAKE_CASE_ = [t[-1] for t in os.walk(os.path.join(a_ , os.listdir(a_ )[0] , 'snapshots' ) )]
SCREAMING_SNAKE_CASE_ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowercase_ (unittest.TestCase ):
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a_ )
SCREAMING_SNAKE_CASE_ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = jax.device_count()
SCREAMING_SNAKE_CASE_ = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(a_ )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ = replicate(a_ )
SCREAMING_SNAKE_CASE_ = jax.random.split(a_ , a_ )
SCREAMING_SNAKE_CASE_ = shard(a_ )
SCREAMING_SNAKE_CASE_ = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(a_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
SCREAMING_SNAKE_CASE_ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(a_ ) == num_samples
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=a_ )
SCREAMING_SNAKE_CASE_ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ = 50
SCREAMING_SNAKE_CASE_ = jax.device_count()
SCREAMING_SNAKE_CASE_ = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(a_ )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ = replicate(a_ )
SCREAMING_SNAKE_CASE_ = jax.random.split(a_ , a_ )
SCREAMING_SNAKE_CASE_ = shard(a_ )
SCREAMING_SNAKE_CASE_ = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a_ )
SCREAMING_SNAKE_CASE_ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ = 50
SCREAMING_SNAKE_CASE_ = jax.device_count()
SCREAMING_SNAKE_CASE_ = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(a_ )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ = replicate(a_ )
SCREAMING_SNAKE_CASE_ = jax.random.split(a_ , a_ )
SCREAMING_SNAKE_CASE_ = shard(a_ )
SCREAMING_SNAKE_CASE_ = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE_ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ = 50
SCREAMING_SNAKE_CASE_ = jax.device_count()
SCREAMING_SNAKE_CASE_ = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(a_ )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ = replicate(a_ )
SCREAMING_SNAKE_CASE_ = jax.random.split(a_ , a_ )
SCREAMING_SNAKE_CASE_ = shard(a_ )
SCREAMING_SNAKE_CASE_ = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=a_ , steps_offset=1 , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=a_ , safety_checker=a_ , )
SCREAMING_SNAKE_CASE_ = scheduler.create_state()
SCREAMING_SNAKE_CASE_ = scheduler_state
SCREAMING_SNAKE_CASE_ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ = 50
SCREAMING_SNAKE_CASE_ = jax.device_count()
SCREAMING_SNAKE_CASE_ = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(a_ )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ = replicate(a_ )
SCREAMING_SNAKE_CASE_ = jax.random.split(a_ , a_ )
SCREAMING_SNAKE_CASE_ = shard(a_ )
SCREAMING_SNAKE_CASE_ = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ = jax.device_count()
SCREAMING_SNAKE_CASE_ = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ = jax.random.split(jax.random.PRNGKey(0 ) , a_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a_ , )
SCREAMING_SNAKE_CASE_ = replicate(a_ )
SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(a_ )
SCREAMING_SNAKE_CASE_ = shard(a_ )
SCREAMING_SNAKE_CASE_ = pipeline(a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a_ , use_memory_efficient_attention=a_ , )
SCREAMING_SNAKE_CASE_ = replicate(a_ )
SCREAMING_SNAKE_CASE_ = pipeline.prepare_inputs(a_ )
SCREAMING_SNAKE_CASE_ = shard(a_ )
SCREAMING_SNAKE_CASE_ = pipeline(a_ , a_ , a_ , jit=a_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2 | 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Dict = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowercase_ ( a ):
'''simple docstring'''
__lowerCAmelCase : List[Any] = "cvt"
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[6_4, 1_9_2, 3_8_4] , a_=[1, 3, 6] , a_=[1, 2, 1_0] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**a_ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_sizes
UpperCAmelCase = patch_stride
UpperCAmelCase = patch_padding
UpperCAmelCase = embed_dim
UpperCAmelCase = num_heads
UpperCAmelCase = depth
UpperCAmelCase = mlp_ratio
UpperCAmelCase = attention_drop_rate
UpperCAmelCase = drop_rate
UpperCAmelCase = drop_path_rate
UpperCAmelCase = qkv_bias
UpperCAmelCase = cls_token
UpperCAmelCase = qkv_projection_method
UpperCAmelCase = kernel_qkv
UpperCAmelCase = padding_kv
UpperCAmelCase = stride_kv
UpperCAmelCase = padding_q
UpperCAmelCase = stride_q
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
| 447 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return x + 2
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = "x = 3"
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(a , {} , state=a )
assert result == 3
self.assertDictEqual(a , {"""x""": 3} )
SCREAMING_SNAKE_CASE = "x = y"
SCREAMING_SNAKE_CASE = {"y": 5}
SCREAMING_SNAKE_CASE = evaluate(a , {} , state=a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a , {"""x""": 5, """y""": 5} )
def _UpperCAmelCase ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = "y = add_two(x)"
SCREAMING_SNAKE_CASE = {"x": 3}
SCREAMING_SNAKE_CASE = evaluate(a , {"""add_two""": add_two} , state=a )
assert result == 5
self.assertDictEqual(a , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE = evaluate(a , {} , state=a )
assert result is None
assert "tried to execute add_two" in out.out
def _UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE = "x = 3"
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(a , {} , state=a )
assert result == 3
self.assertDictEqual(a , {"""x""": 3} )
def _UpperCAmelCase ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE = {"x": 3}
SCREAMING_SNAKE_CASE = evaluate(a , {"""add_two""": add_two} , state=a )
self.assertDictEqual(a , {"""x""": 3, """y""": 5} )
self.assertDictEqual(a , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def _UpperCAmelCase ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(a , {} , state=a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a , {"""x""": 3, """y""": 5} )
def _UpperCAmelCase ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE = {"x": 3}
SCREAMING_SNAKE_CASE = evaluate(a , {} , state=a )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(a , {"""x""": 3, """text""": """This is x: 3."""} )
def _UpperCAmelCase ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE = {"x": 3}
SCREAMING_SNAKE_CASE = evaluate(a , {} , state=a )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(a , {"""x""": 3, """y""": 2} )
SCREAMING_SNAKE_CASE = {"x": 8}
SCREAMING_SNAKE_CASE = evaluate(a , {} , state=a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a , {"""x""": 8, """y""": 5} )
def _UpperCAmelCase ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE = {"x": 3}
SCREAMING_SNAKE_CASE = evaluate(a , {"""add_two""": add_two} , state=a )
self.assertListEqual(a , [3, 5] )
self.assertDictEqual(a , {"""x""": 3, """test_list""": [3, 5]} )
def _UpperCAmelCase ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE = "y = x"
SCREAMING_SNAKE_CASE = {"x": 3}
SCREAMING_SNAKE_CASE = evaluate(a , {} , state=a )
assert result == 3
self.assertDictEqual(a , {"""x""": 3, """y""": 3} )
def _UpperCAmelCase ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE = {"x": 3}
SCREAMING_SNAKE_CASE = evaluate(a , {"""add_two""": add_two} , state=a )
assert result == 5
self.assertDictEqual(a , {"""x""": 3, """test_list""": [3, 5]} )
SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE = {"x": 3}
SCREAMING_SNAKE_CASE = evaluate(a , {"""add_two""": add_two} , state=a )
assert result == 5
self.assertDictEqual(a , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def _UpperCAmelCase ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(a , {"""range""": range} , state=a )
assert result == 2
self.assertDictEqual(a , {"""x""": 2, """i""": 2} )
| 706 |
from __future__ import annotations
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE ) )] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE ):
print(SCREAMING_SNAKE_CASE )
return
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 450 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase__ :List[str] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def __lowercase (_lowercase ) -> Any:
"""simple docstring"""
__lowerCamelCase : str = test_results.split(""" """ )
__lowerCamelCase : List[str] = 0
__lowerCamelCase : Any = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowerCamelCase : Union[str, Any] = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __lowercase (_lowercase ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase : Any = {}
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""", _lowercase ):
__lowerCamelCase : int = True
__lowerCamelCase : List[Any] = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
__lowerCamelCase : Optional[int] = line
__lowerCamelCase : Optional[Any] = False
return failures
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , A__ : str , A__ : Dict ):
"""simple docstring"""
__lowerCamelCase : Any = title
__lowerCamelCase : int = doc_test_results["""time_spent"""].split(""",""" )[0]
__lowerCamelCase : Any = doc_test_results["""success"""]
__lowerCamelCase : Optional[int] = doc_test_results["""failures"""]
__lowerCamelCase : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowerCamelCase : Union[str, Any] = doc_test_results
@property
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = [self._time_spent]
__lowerCamelCase : Optional[Any] = 0
for time in time_spent:
__lowerCamelCase : Union[str, Any] = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(A__ ) == 1:
__lowerCamelCase : Any = [0, 0, time_parts[0]]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(A__ )}h{int(A__ )}m{int(A__ )}s"
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def a_ ( self : int ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def a_ ( self : Dict ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = 40
__lowerCamelCase : List[str] = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(A__ , A__ )}
__lowerCamelCase : List[Any] = """"""
for category, failures in category_failures.items():
if len(A__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(A__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def a_ ( self : Any ):
"""simple docstring"""
__lowerCamelCase : int = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(A__ )
@staticmethod
def a_ ( ):
"""simple docstring"""
__lowerCamelCase : Dict = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(A__ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=A__ , )
def a_ ( self : List[str] ):
"""simple docstring"""
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
__lowerCamelCase : Optional[int] = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else """All tests passed."""
__lowerCamelCase : Dict = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=A__ , )
def a_ ( self : Tuple , A__ : Tuple , A__ : Any , A__ : Any , A__ : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = """"""
for key, value in failures.items():
__lowerCamelCase : Union[str, Any] = value[:200] + """ [Truncated]""" if len(A__ ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowerCamelCase : Tuple = job_name
__lowerCamelCase : Optional[int] = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
__lowerCamelCase : Tuple = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def a_ ( self : int ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
__lowerCamelCase : str = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
__lowerCamelCase : List[Any] = sorted(self.doc_test_results.items() , key=lambda A__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
__lowerCamelCase : str = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowerCamelCase : Dict = job_result["""failures"""]
__lowerCamelCase : str = self.get_reply_blocks(A__ , A__ , A__ , text=A__ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"Results for {job}" , blocks=A__ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def __lowercase () -> str:
"""simple docstring"""
__lowerCamelCase : List[Any] = os.environ["""GITHUB_RUN_ID"""]
__lowerCamelCase : List[str] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowerCamelCase : List[str] = requests.get(_lowercase ).json()
__lowerCamelCase : List[Any] = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__lowerCamelCase : Optional[int] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(_lowercase ):
__lowerCamelCase : Tuple = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""", _lowercase )
return {}
def __lowercase (_lowercase ) -> Any:
"""simple docstring"""
__lowerCamelCase : List[str] = {}
if os.path.exists(_lowercase ):
__lowerCamelCase : Tuple = os.listdir(_lowercase )
for file in files:
try:
with open(os.path.join(_lowercase, _lowercase ), encoding="""utf-8""" ) as f:
__lowerCamelCase : str = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(_lowercase, _lowercase )}." ) from e
return _artifact
def __lowercase () -> str:
"""simple docstring"""
class SCREAMING_SNAKE_CASE :
def __init__( self : str , A__ : str ):
"""simple docstring"""
__lowerCamelCase : List[Any] = name
__lowerCamelCase : str = []
def __str__( self : Optional[Any] ):
"""simple docstring"""
return self.name
def a_ ( self : Dict , A__ : str ):
"""simple docstring"""
self.paths.append({"""name""": self.name, """path""": path} )
__lowerCamelCase : Dict[str, Artifact] = {}
__lowerCamelCase : Tuple = filter(os.path.isdir, os.listdir() )
for directory in directories:
__lowerCamelCase : str = directory
if artifact_name not in _available_artifacts:
__lowerCamelCase : Optional[int] = Artifact(_lowercase )
_available_artifacts[artifact_name].add_path(_lowercase )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase__ :List[str] = get_job_links()
UpperCAmelCase__ :Union[str, Any] = retrieve_available_artifacts()
UpperCAmelCase__ :int = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase__ :List[Any] = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase__ :Any = github_actions_job_links.get("""run_doctests""")
UpperCAmelCase__ :Tuple = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
UpperCAmelCase__ :Any = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Any = handle_test_results(artifact["""stats"""])
UpperCAmelCase__ :Optional[int] = failed
UpperCAmelCase__ :Union[str, Any] = success
UpperCAmelCase__ :str = time_spent[1:-1] + """, """
UpperCAmelCase__ :Optional[int] = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
UpperCAmelCase__ :List[str] = line.replace("""FAILED """, """""")
UpperCAmelCase__ :int = line.split()[0].replace("""\n""", """""")
if "::" in line:
UpperCAmelCase__ , UpperCAmelCase__ :str = line.split("""::""")
else:
UpperCAmelCase__ , UpperCAmelCase__ :Any = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase__ :Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase__ :List[Any] = all_failures[test] if test in all_failures else """N/A"""
UpperCAmelCase__ :Tuple = failure
break
UpperCAmelCase__ :Dict = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 150 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __lowercase () -> str:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowerCamelCase : List[Any] = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching, """os.path.join""", _lowercase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __lowercase () -> str:
"""simple docstring"""
assert _test_patching.open is open
__lowerCamelCase : List[str] = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, """open""", _lowercase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __lowercase () -> Dict:
"""simple docstring"""
# pandas.read_csv is not present in _test_patching
__lowerCamelCase : List[str] = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching, """pandas.read_csv""", _lowercase ):
pass
def __lowercase () -> Union[str, Any]:
"""simple docstring"""
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__lowerCamelCase : Tuple = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, """len""", _lowercase ) is None
with patch_submodule(_test_patching, """len""", _lowercase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __lowercase () -> List[str]:
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = """__test_patch_submodule_start_and_stop_mock__"""
__lowerCamelCase : Dict = patch_submodule(_test_patching, """open""", _lowercase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __lowercase () -> List[Any]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowerCamelCase : Dict = """__test_patch_submodule_successive_join__"""
__lowerCamelCase : List[str] = """__test_patch_submodule_successive_dirname__"""
__lowerCamelCase : Dict = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, """os.path.join""", _lowercase ):
with patch_submodule(_test_patching, """os.rename""", _lowercase ):
with patch_submodule(_test_patching, """os.path.dirname""", _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, """os.rename""", _lowercase ):
with patch_submodule(_test_patching, """os.path.join""", _lowercase ):
with patch_submodule(_test_patching, """os.path.dirname""", _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __lowercase () -> List[Any]:
"""simple docstring"""
__lowerCamelCase : List[Any] = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching, """__module_that_doesn_exist__.__attribute_that_doesn_exist__""", _lowercase ):
pass
with patch_submodule(_test_patching, """os.__attribute_that_doesn_exist__""", _lowercase ):
pass
| 150 | 1 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTSanJapaneseTokenizer
lowercase_ = False
lowercase_ = {'do_clean_text': False, 'add_prefix_space': False}
def snake_case ( self : str ):
super().setUp()
# fmt: off
lowercase__ : Optional[int] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowercase__ : str = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowercase__ : str = {'unk_token': '<unk>'}
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(_lowercase ) )
def snake_case ( self : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Optional[int] = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowercase__ : Dict = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = self.get_input_output_texts(_lowercase )
lowercase__ : List[Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
lowercase__ : Optional[Any] = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase )
return text, ids
def snake_case ( self : str ):
pass # TODO add if relevant
def snake_case ( self : Union[str, Any] ):
pass # TODO add if relevant
def snake_case ( self : Dict ):
pass # TODO add if relevant
def snake_case ( self : Any ):
lowercase__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowercase__ : Any = 'こんにちは、世界。 こんばんは、㔺界。'
lowercase__ : Optional[Any] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowercase__ : List[Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Testing conversion to ids without special tokens
lowercase__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase__ : List[Any] = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = tokens + [tokenizer.unk_token]
lowercase__ : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase__ : Dict = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def snake_case ( self : List[Any] ):
lowercase__ : Any = self.get_tokenizer()
# Testing tokenization
lowercase__ : Optional[Any] = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowercase__ : str = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowercase__ : Optional[int] = tokenizer.encode(_lowercase )
lowercase__ : int = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase , _lowercase )
@slow
def snake_case ( self : str ):
lowercase__ : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
lowercase__ : Tuple = 'こんにちは、世界。'
lowercase__ : Optional[int] = 'こんばんは、㔺界。😀'
lowercase__ : Optional[int] = 'こんにちは、世界。こんばんは、世界。😀'
lowercase__ : Union[str, Any] = tokenizer.encode(prefix_text + input_text )
lowercase__ : str = tokenizer.encode("" , prefix_text=prefix_text + input_text )
lowercase__ : Union[str, Any] = tokenizer.encode(_lowercase , prefix_text=_lowercase )
lowercase__ : Optional[int] = tokenizer.decode(_lowercase )
lowercase__ : List[Any] = tokenizer.decode(_lowercase )
lowercase__ : List[Any] = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
@slow
def snake_case ( self : Optional[Any] ):
lowercase__ : str = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
lowercase__ : Tuple = 'こんにちは、世界。'
lowercase__ : str = 'こんばんは、㔺界。😀'
lowercase__ : int = len(tokenizer.encode(_lowercase ) ) - 2
lowercase__ : str = len(tokenizer.encode(_lowercase ) ) - 2
lowercase__ : str = [1] + [0] * (len_prefix + len_text + 1)
lowercase__ : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowercase__ : Union[str, Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase__ : Optional[int] = tokenizer(prefix_text + input_text ).token_type_ids
lowercase__ : Optional[int] = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase__ : List[Any] = tokenizer(_lowercase , prefix_text=_lowercase ).token_type_ids
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def snake_case ( self : Tuple ):
lowercase__ : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
lowercase__ : Any = tokenizer.encode("あンいワ" )
lowercase__ : Union[str, Any] = tokenizer.encode("" , prefix_text="あンいワ" )
lowercase__ : List[str] = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(_lowercase ) , tokenizer.decode(_lowercase ) )
self.assertEqual(tokenizer.decode(_lowercase ) , tokenizer.decode(_lowercase ) )
self.assertNotEqual(_lowercase , _lowercase )
self.assertNotEqual(_lowercase , _lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
lowercase__ : Union[str, Any] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowercase__ : Optional[int] = tokenizer(_lowercase , padding=_lowercase )
lowercase__ : List[str] = tokenizer.batch_encode_plus(_lowercase , padding=_lowercase )
# fmt: off
lowercase__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
lowercase__ : str = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase__ : List[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _lowercase )
self.assertListEqual(x_token.token_type_ids , _lowercase )
self.assertListEqual(x_token.attention_mask , _lowercase )
self.assertListEqual(x_token_a.input_ids , _lowercase )
self.assertListEqual(x_token_a.token_type_ids , _lowercase )
self.assertListEqual(x_token_a.attention_mask , _lowercase )
def snake_case ( self : Dict ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def snake_case ( self : Dict ):
# tokenizer has no padding token
pass
| 707 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 81 | 0 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
snake_case__ : Dict = False
snake_case__ : Optional[int] = False
def __lowerCamelCase ( A__ : Namespace ) -> int:
return TrainCommand(A__ )
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
@staticmethod
def _lowerCAmelCase ( __a : ArgumentParser ) ->str:
lowerCamelCase_ : Dict = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=__a , required=__a , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=__a , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=__a , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=__a , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=__a , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=__a , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=__a , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=__a , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=__a , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=__a , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=__a , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=__a , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=__a , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : Namespace ) ->Optional[Any]:
lowerCamelCase_ : Dict = logging.get_logger("""transformers-cli/training""" )
lowerCamelCase_ : Optional[Any] = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=__a )
lowerCamelCase_ : Optional[Any] = args.output
lowerCamelCase_ : Tuple = args.column_label
lowerCamelCase_ : Tuple = args.column_text
lowerCamelCase_ : Tuple = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
lowerCamelCase_ : List[Any] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
lowerCamelCase_ : Optional[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase_ : Any = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
lowerCamelCase_ : Union[str, Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase_ : Dict = args.validation_split
lowerCamelCase_ : Optional[Any] = args.train_batch_size
lowerCamelCase_ : Union[str, Any] = args.valid_batch_size
lowerCamelCase_ : Union[str, Any] = args.learning_rate
lowerCamelCase_ : List[Any] = args.adam_epsilon
def _lowerCAmelCase ( self : Tuple ) ->Dict:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowerCAmelCase ( self : List[str] ) ->Dict:
raise NotImplementedError
def _lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 278 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 278 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = "vision-encoder-decoder"
__snake_case : List[Any] = True
def __init__( self : Union[str, Any] ,**lowerCamelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
SCREAMING_SNAKE_CASE = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE = decoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE = AutoConfig.for_model(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.for_model(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : PretrainedConfig ,**lowerCamelCase__ : Dict ) -> PretrainedConfig:
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.encoder.to_dict()
SCREAMING_SNAKE_CASE = self.decoder.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> float:
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OrderedDict()
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : "PreTrainedTokenizerBase" ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional["TensorType"] = None ,) -> Mapping[str, Any]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE = OrderedDict()
SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(
lowerCamelCase__ ,batch_size=lowerCamelCase__ ,seq_length=lowerCamelCase__ ,is_pair=lowerCamelCase__ ,framework=lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = dummy_input["""input_ids"""].shape
SCREAMING_SNAKE_CASE = (batch, encoder_sequence, self._config.encoder_hidden_size)
SCREAMING_SNAKE_CASE = dummy_input.pop("""input_ids""" )
SCREAMING_SNAKE_CASE = dummy_input.pop("""attention_mask""" )
SCREAMING_SNAKE_CASE = torch.zeros(lowerCamelCase__ )
return common_inputs
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> None:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : PretrainedConfig ) -> OnnxConfig:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : str = "default" ) -> OnnxConfig:
'''simple docstring'''
SCREAMING_SNAKE_CASE = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase__ ,lowerCamelCase__ )
| 116 |
import os
from datetime import datetime as dt
from github import Github
SCREAMING_SNAKE_CASE_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def __lowercase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Github(os.environ["""GITHUB_TOKEN"""] )
SCREAMING_SNAKE_CASE = g.get_repo("""huggingface/accelerate""" )
SCREAMING_SNAKE_CASE = repo.get_issues(state="""open""" )
for issue in open_issues:
SCREAMING_SNAKE_CASE = sorted([comment for comment in issue.get_comments()] , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None
SCREAMING_SNAKE_CASE = dt.utcnow()
SCREAMING_SNAKE_CASE = (current_time - issue.updated_at).days
SCREAMING_SNAKE_CASE = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 116 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ) -> Union[str, Any]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowercase__ ):
__lowercase : Dict = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
__lowercase : str = FlaxAutoModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def _lowerCamelCase ( self ) -> Any:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowercase__ ):
__lowercase : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
__lowercase : str = FlaxAutoModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def _lowerCamelCase ( self ) -> Tuple:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__lowercase : Any = AutoTokenizer.from_pretrained(lowercase__ )
__lowercase : Any = FlaxBertModel.from_pretrained(lowercase__ )
__lowercase : List[str] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase_ ):
return model(**lowercase__ )
eval(**lowercase__ ).block_until_ready()
@slow
def _lowerCamelCase ( self ) -> Any:
for model_name in ["roberta-base", "roberta-large"]:
__lowercase : int = AutoTokenizer.from_pretrained(lowercase__ )
__lowercase : int = FlaxRobertaModel.from_pretrained(lowercase__ )
__lowercase : Union[str, Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase_ ):
return model(**lowercase__ )
eval(**lowercase__ ).block_until_ready()
def _lowerCamelCase ( self ) -> List[Any]:
with self.assertRaisesRegex(
lowercase__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__lowercase : Optional[int] = FlaxAutoModel.from_pretrained('''bert-base''' )
def _lowerCamelCase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowercase__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowercase : Union[str, Any] = FlaxAutoModel.from_pretrained(lowercase__ , revision='''aaaaaa''' )
def _lowerCamelCase ( self ) -> Any:
with self.assertRaisesRegex(
lowercase__ , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
__lowercase : Optional[int] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def _lowerCamelCase ( self ) -> List[Any]:
with self.assertRaisesRegex(lowercase__ , '''Use `from_pt=True` to load this model''' ):
__lowercase : Dict = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 76 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase = datasets.logging.get_logger(__name__)
lowerCAmelCase = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
lowerCAmelCase = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
lowerCAmelCase = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_="dummy_doc" ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = {doc: key_lines}
__UpperCAmelCase : Tuple = {doc: sys_lines}
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Any = 0
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = reader.get_doc_mentions(lowercase_ , key_doc_lines[doc] , lowercase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
__UpperCAmelCase : List[Any] = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = reader.get_doc_mentions(lowercase_ , sys_doc_lines[doc] , lowercase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__UpperCAmelCase : Union[str, Any] = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
if remove_nested:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__UpperCAmelCase , __UpperCAmelCase : str = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__UpperCAmelCase : List[Any] = reader.get_mention_assignments(lowercase_ , lowercase_ )
__UpperCAmelCase : Optional[Any] = reader.get_mention_assignments(lowercase_ , lowercase_ )
__UpperCAmelCase : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = get_coref_infos(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 0
for name, metric in metrics:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = evaluator.evaluate_documents(lowercase_ , lowercase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , f"Recall: {recall * 100:.2f}" , f" Precision: {precision * 100:.2f}" , f" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__UpperCAmelCase : Optional[int] = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
__UpperCAmelCase : List[Any] = line.split()[5]
if not parse_col == "-":
__UpperCAmelCase : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def A( self , lowercase__ , lowercase__ , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False):
__UpperCAmelCase : List[Any] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
__UpperCAmelCase : Optional[Any] = util.check_gold_parse_annotation(lowercase__)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__UpperCAmelCase : Tuple = evaluate(
key_lines=lowercase__ , sys_lines=lowercase__ , metrics=lowercase__ , NP_only=lowercase__ , remove_nested=lowercase__ , keep_singletons=lowercase__ , min_span=lowercase__ , )
return score
| 462 | 0 |
from heapq import heappop, heappush
import numpy as np
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =grid.shape
SCREAMING_SNAKE_CASE__ =[-1, 1, 0, 0]
SCREAMING_SNAKE_CASE__ =[0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =[(0, source)], set()
SCREAMING_SNAKE_CASE__ =np.full((rows, cols), np.inf )
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ =np.empty((rows, cols), dtype=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =None
while queue:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) =heappop(__UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
SCREAMING_SNAKE_CASE__ =[]
while (x, y) != source:
path.append((x, y) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =predecessors[x, y]
path.append(__UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
SCREAMING_SNAKE_CASE__ =grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCamelCase, (dist + 1, (nx, ny)) )
SCREAMING_SNAKE_CASE__ =dist + 1
SCREAMING_SNAKE_CASE__ =(x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : List[Any] = "trocr"
_A : Optional[int] = ["past_key_values"]
_A : Dict = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : str ,_UpperCamelCase : Dict=5_0_2_6_5 ,_UpperCamelCase : int=1_0_2_4 ,_UpperCamelCase : Union[str, Any]=1_2 ,_UpperCamelCase : Union[str, Any]=1_6 ,_UpperCamelCase : List[Any]=4_0_9_6 ,_UpperCamelCase : str="gelu" ,_UpperCamelCase : Dict=5_1_2 ,_UpperCamelCase : List[Any]=0.1 ,_UpperCamelCase : Dict=0.0 ,_UpperCamelCase : Optional[int]=0.0 ,_UpperCamelCase : Union[str, Any]=2 ,_UpperCamelCase : str=0.02 ,_UpperCamelCase : Any=0.0 ,_UpperCamelCase : Optional[int]=True ,_UpperCamelCase : str=False ,_UpperCamelCase : int=True ,_UpperCamelCase : Tuple=True ,_UpperCamelCase : str=1 ,_UpperCamelCase : Optional[Any]=0 ,_UpperCamelCase : str=2 ,**_UpperCamelCase : Any ,) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =vocab_size
SCREAMING_SNAKE_CASE__ =d_model
SCREAMING_SNAKE_CASE__ =decoder_layers
SCREAMING_SNAKE_CASE__ =decoder_attention_heads
SCREAMING_SNAKE_CASE__ =decoder_ffn_dim
SCREAMING_SNAKE_CASE__ =activation_function
SCREAMING_SNAKE_CASE__ =max_position_embeddings
SCREAMING_SNAKE_CASE__ =dropout
SCREAMING_SNAKE_CASE__ =attention_dropout
SCREAMING_SNAKE_CASE__ =activation_dropout
SCREAMING_SNAKE_CASE__ =init_std
SCREAMING_SNAKE_CASE__ =decoder_layerdrop
SCREAMING_SNAKE_CASE__ =use_cache
SCREAMING_SNAKE_CASE__ =scale_embedding
SCREAMING_SNAKE_CASE__ =use_learned_position_embeddings
SCREAMING_SNAKE_CASE__ =layernorm_embedding
super().__init__(
pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,decoder_start_token_id=_UpperCamelCase ,**_UpperCamelCase ,)
| 588 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 109 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class __a :
__UpperCamelCase : List[str] = list_field(
default=[], metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
}, )
__UpperCamelCase : List[int] = list_field(
default=[8], metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512], metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Use FP16 to accelerate inference.'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Benchmark training of model'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Verbose memory tracing'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
}, )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Trace memory line by line'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Save result to a CSV file'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Save all print statements in a log file'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Whether to print environment information'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
}, )
__UpperCamelCase : str = field(
default=F'inference_time_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving time results to csv.'}, )
__UpperCamelCase : str = field(
default=F'inference_memory_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving memory results to csv.'}, )
__UpperCamelCase : str = field(
default=F'train_time_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving time results to csv for training.'}, )
__UpperCamelCase : str = field(
default=F'train_memory_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving memory results to csv for training.'}, )
__UpperCamelCase : str = field(
default=F'env_info_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving environment information.'}, )
__UpperCamelCase : str = field(
default=F'log_{round(time() )}.csv', metadata={'help': 'Log filename used if print statements are saved in log.'}, )
__UpperCamelCase : int = field(default=3, metadata={'help': 'Times an experiment will be run.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
}, )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" ,lowerCamelCase ,)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 109 | 1 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( lowercase_, lowercase_, lowercase_ ):
"""simple docstring"""
_a = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self, _lowercase, _lowercase, _lowercase = None, _lowercase = 50257, _lowercase = 1024, _lowercase = 768, _lowercase = 12, _lowercase = 12, _lowercase = None, _lowercase = "gelu_new", _lowercase = 0.1, _lowercase = 0.1, _lowercase = 0.1, _lowercase = 1E-5, _lowercase = 0.02, _lowercase = True, _lowercase = True, _lowercase = False, _lowercase = False, ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE_ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
SCREAMING_SNAKE_CASE_ = prefix_inner_dim
SCREAMING_SNAKE_CASE_ = prefix_hidden_dim
SCREAMING_SNAKE_CASE_ = (
nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE_ = (
nn.Linear(self.prefix_hidden_dim, lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE_ = GPTaConfig(
vocab_size=lowerCamelCase_, n_positions=lowerCamelCase_, n_embd=lowerCamelCase_, n_layer=lowerCamelCase_, n_head=lowerCamelCase_, n_inner=lowerCamelCase_, activation_function=lowerCamelCase_, resid_pdrop=lowerCamelCase_, embd_pdrop=lowerCamelCase_, attn_pdrop=lowerCamelCase_, layer_norm_epsilon=lowerCamelCase_, initializer_range=lowerCamelCase_, scale_attn_weights=lowerCamelCase_, use_cache=lowerCamelCase_, scale_attn_by_inverse_layer_idx=lowerCamelCase_, reorder_and_upcast_attn=lowerCamelCase_, )
SCREAMING_SNAKE_CASE_ = GPTaLMHeadModel(lowerCamelCase_ )
def a__ ( self, _lowercase, _lowercase, _lowercase = None, _lowercase = None, ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = torch.cat((prefix_embeds, embedding_text), dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.get_dummy_token(input_ids.shape[0], input_ids.device )
SCREAMING_SNAKE_CASE_ = torch.cat((dummy_token, input_ids), dim=1 )
SCREAMING_SNAKE_CASE_ = self.transformer(inputs_embeds=lowerCamelCase_, labels=lowerCamelCase_, attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def a__ ( self, _lowercase, _lowercase ) -> str:
return torch.zeros(lowerCamelCase_, self.prefix_length, dtype=torch.intaa, device=lowerCamelCase_ )
def a__ ( self, _lowercase ) -> List[Any]:
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def a__ ( self, _lowercase, _lowercase, _lowercase ) -> Dict:
SCREAMING_SNAKE_CASE_ = torch.split(lowerCamelCase_, 1, dim=0 )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for feature in features:
SCREAMING_SNAKE_CASE_ = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE_ = self.generate_beam(
input_embeds=lowerCamelCase_, device=lowerCamelCase_, eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE_ = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def a__ ( self, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase = 5, _lowercase = 67, _lowercase = 1.0, _lowercase = None, ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.ones(lowerCamelCase_, device=lowerCamelCase_, dtype=torch.int )
SCREAMING_SNAKE_CASE_ = torch.zeros(lowerCamelCase_, device=lowerCamelCase_, dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE_ = input_embeds
else:
SCREAMING_SNAKE_CASE_ = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = outputs.logits
SCREAMING_SNAKE_CASE_ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE_ = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE_ = logits.topk(lowerCamelCase_, -1 )
SCREAMING_SNAKE_CASE_ = generated.expand(lowerCamelCase_, *generated.shape[1:] )
SCREAMING_SNAKE_CASE_ = next_tokens.permute(1, 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE_ = next_tokens
else:
SCREAMING_SNAKE_CASE_ = tokens.expand(lowerCamelCase_, *tokens.shape[1:] )
SCREAMING_SNAKE_CASE_ = torch.cat((tokens, next_tokens), dim=1 )
else:
SCREAMING_SNAKE_CASE_ = -float(np.inf )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE_ = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE_ = scores_sum_average.view(-1 ).topk(lowerCamelCase_, -1 )
SCREAMING_SNAKE_CASE_ = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE_ = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE_ = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE_ = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE_ = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE_ = torch.cat((tokens, next_tokens), dim=1 )
SCREAMING_SNAKE_CASE_ = generated[next_tokens_source]
SCREAMING_SNAKE_CASE_ = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE_ = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE_ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0], 1, -1 )
SCREAMING_SNAKE_CASE_ = torch.cat((generated, next_token_embed), dim=1 )
SCREAMING_SNAKE_CASE_ = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE_ = scores / seq_lengths
SCREAMING_SNAKE_CASE_ = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE_ = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE_ = torch.stack(lowerCamelCase_, dim=0 )
SCREAMING_SNAKE_CASE_ = torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 713 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' ,[None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' ,['default', 0, 100 * 2**20, 900 * 2**20] )
def _UpperCamelCase ( lowerCAmelCase__: Union[str, Any] ,lowerCAmelCase__: Optional[int] ,lowerCAmelCase__: List[Any] ) -> List[str]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,'IN_MEMORY_MAX_SIZE' ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = is_small_dataset(lowerCAmelCase__ )
assert result == expected
| 238 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase : List[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = path + """.py"""
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = path + """.py"""
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
lowerCamelCase__ = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
lowerCamelCase__ = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
lowerCamelCase__ = expected_configs[0]
assert expected_config in infos
lowerCamelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
lowerCamelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 50 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Any = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
snake_case_ = "linear"
snake_case_ = "cosine"
snake_case_ = "cosine_with_restarts"
snake_case_ = "polynomial"
snake_case_ = "constant"
snake_case_ = "constant_with_warmup"
snake_case_ = "piecewise_constant"
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase, lambda __UpperCAmelCase : 1, last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0, __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase, __UpperCAmelCase, last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = -1 ) -> Any:
'''simple docstring'''
snake_case_ = {}
snake_case_ = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
snake_case_ ,snake_case_ = rule_str.split(''':''' )
snake_case_ = int(__UpperCAmelCase )
snake_case_ = float(__UpperCAmelCase )
snake_case_ = value
snake_case_ = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase, __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
snake_case_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
snake_case_ = create_rules_function(__UpperCAmelCase, __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase, __UpperCAmelCase, last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=-1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1, __UpperCAmelCase ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.5, __UpperCAmelCase = -1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1, __UpperCAmelCase ) )
snake_case_ = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 1, __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1, __UpperCAmelCase ) )
snake_case_ = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=1e-7, __UpperCAmelCase=1.0, __UpperCAmelCase=-1 ) -> str:
'''simple docstring'''
snake_case_ = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1, __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
snake_case_ = lr_init - lr_end
snake_case_ = num_training_steps - num_warmup_steps
snake_case_ = 1 - (current_step - num_warmup_steps) / decay_steps
snake_case_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
a : Tuple = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = 1, __UpperCAmelCase = 1.0, __UpperCAmelCase = -1, ) -> int:
'''simple docstring'''
snake_case_ = SchedulerType(__UpperCAmelCase )
snake_case_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase, last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase, step_rules=__UpperCAmelCase, last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase, num_warmup_steps=__UpperCAmelCase, last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase, num_warmup_steps=__UpperCAmelCase, num_training_steps=__UpperCAmelCase, num_cycles=__UpperCAmelCase, last_epoch=__UpperCAmelCase, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase, num_warmup_steps=__UpperCAmelCase, num_training_steps=__UpperCAmelCase, power=__UpperCAmelCase, last_epoch=__UpperCAmelCase, )
return schedule_func(
__UpperCAmelCase, num_warmup_steps=__UpperCAmelCase, num_training_steps=__UpperCAmelCase, last_epoch=__UpperCAmelCase )
| 640 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Optional[Any] = (UnCLIPScheduler,)
def __lowercase ( self :Any , **__lowercase :str ):
__lowerCamelCase : Optional[int] ={
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowercase )
return config
def __lowercase ( self :Tuple ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase )
def __lowercase ( self :Tuple ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowercase )
def __lowercase ( self :List[str] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowercase )
def __lowercase ( self :List[Any] ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowercase )
def __lowercase ( self :Any ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowercase )
def __lowercase ( self :Union[str, Any] ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowercase , prev_timestep=__lowercase )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : List[Any] =self.scheduler_classes[0]
__lowerCamelCase : str =self.get_scheduler_config(variance_type='''fixed_small_log''' )
__lowerCamelCase : Tuple =scheduler_class(**__lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase : Tuple =self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] =self.get_scheduler_config(variance_type='''learned_range''' )
__lowerCamelCase : Optional[Any] =scheduler_class(**__lowercase )
__lowerCamelCase : Optional[int] =0.5
assert scheduler._get_variance(1 , predicted_variance=__lowercase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__lowercase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__lowercase ) - -0.0010011 < 1e-5
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase : Any =self.scheduler_classes[0]
__lowerCamelCase : Optional[int] =self.get_scheduler_config()
__lowerCamelCase : str =scheduler_class(**__lowercase )
__lowerCamelCase : Tuple =scheduler.timesteps
__lowerCamelCase : Any =self.dummy_model()
__lowerCamelCase : Any =self.dummy_sample_deter
__lowerCamelCase : List[Any] =torch.manual_seed(0 )
for i, t in enumerate(__lowercase ):
# 1. predict noise residual
__lowerCamelCase : Tuple =model(__lowercase , __lowercase )
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : List[Any] =scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase ).prev_sample
__lowerCamelCase : List[Any] =pred_prev_sample
__lowerCamelCase : Optional[Any] =torch.sum(torch.abs(__lowercase ) )
__lowerCamelCase : Optional[int] =torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def __lowercase ( self :int ):
__lowerCamelCase : Optional[Any] =self.scheduler_classes[0]
__lowerCamelCase : Any =self.get_scheduler_config()
__lowerCamelCase : Optional[Any] =scheduler_class(**__lowercase )
scheduler.set_timesteps(25 )
__lowerCamelCase : Dict =scheduler.timesteps
__lowerCamelCase : List[str] =self.dummy_model()
__lowerCamelCase : List[str] =self.dummy_sample_deter
__lowerCamelCase : Any =torch.manual_seed(0 )
for i, t in enumerate(__lowercase ):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] =model(__lowercase , __lowercase )
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Any =None
else:
__lowerCamelCase : Union[str, Any] =timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : List[str] =scheduler.step(
__lowercase , __lowercase , __lowercase , prev_timestep=__lowercase , generator=__lowercase ).prev_sample
__lowerCamelCase : int =pred_prev_sample
__lowerCamelCase : List[Any] =torch.sum(torch.abs(__lowercase ) )
__lowerCamelCase : List[str] =torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def __lowercase ( self :List[str] ):
pass
def __lowercase ( self :Tuple ):
pass
| 363 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : Dict = CycleDiffusionPipeline
__snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__snake_case : Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
__snake_case : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
__snake_case : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__snake_case : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self :Dict ):
torch.manual_seed(0 )
__lowerCamelCase : List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowerCamelCase : List[Any] =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Optional[Any] =CLIPTextModel(__lowercase )
__lowerCamelCase : Tuple =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase : Optional[int] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowercase ( self :Union[str, Any] , __lowercase :Optional[int] , __lowercase :str=0 ):
__lowerCamelCase : List[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCamelCase : str =image / 2 + 0.5
if str(__lowercase ).startswith('''mps''' ):
__lowerCamelCase : Union[str, Any] =torch.manual_seed(__lowercase )
else:
__lowerCamelCase : Any =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCamelCase : Dict ={
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : int ='''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Tuple =self.get_dummy_components()
__lowerCamelCase : List[str] =CycleDiffusionPipeline(**__lowercase )
__lowerCamelCase : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : List[Any] =self.get_dummy_inputs(__lowercase )
__lowerCamelCase : int =pipe(**__lowercase )
__lowerCamelCase : Dict =output.images
__lowerCamelCase : Union[str, Any] =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[Any] =np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowercase ( self :str ):
__lowerCamelCase : str =self.get_dummy_components()
for name, module in components.items():
if hasattr(__lowercase , '''half''' ):
__lowerCamelCase : Union[str, Any] =module.half()
__lowerCamelCase : int =CycleDiffusionPipeline(**__lowercase )
__lowerCamelCase : List[str] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : Optional[int] =self.get_dummy_inputs(__lowercase )
__lowerCamelCase : Dict =pipe(**__lowercase )
__lowerCamelCase : List[str] =output.images
__lowerCamelCase : Dict =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : str =np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowercase ( self :Optional[Any] ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def __lowercase ( self :Dict ):
return super().test_inference_batch_single_identical()
@skip_mps
def __lowercase ( self :Optional[Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowercase ( self :str ):
return super().test_save_load_optional_components()
@skip_mps
def __lowercase ( self :Dict ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self :Dict ):
__lowerCamelCase : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__lowerCamelCase : Any =init_image.resize((512, 512) )
__lowerCamelCase : Optional[Any] ='''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase : Optional[Any] =DDIMScheduler.from_pretrained(__lowercase , subfolder='''scheduler''' )
__lowerCamelCase : Optional[Any] =CycleDiffusionPipeline.from_pretrained(
__lowercase , scheduler=__lowercase , safety_checker=__lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__lowerCamelCase : Dict ='''A black colored car'''
__lowerCamelCase : Union[str, Any] ='''A blue colored car'''
__lowerCamelCase : Dict =torch.manual_seed(0 )
__lowerCamelCase : Tuple =pipe(
prompt=__lowercase , source_prompt=__lowercase , image=__lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowercase , output_type='''np''' , )
__lowerCamelCase : Tuple =output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __lowercase ( self :Any ):
__lowerCamelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase : List[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__lowerCamelCase : Optional[Any] =init_image.resize((512, 512) )
__lowerCamelCase : Any ='''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase : List[Any] =DDIMScheduler.from_pretrained(__lowercase , subfolder='''scheduler''' )
__lowerCamelCase : str =CycleDiffusionPipeline.from_pretrained(__lowercase , scheduler=__lowercase , safety_checker=__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__lowerCamelCase : Any ='''A black colored car'''
__lowerCamelCase : int ='''A blue colored car'''
__lowerCamelCase : Tuple =torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] =pipe(
prompt=__lowercase , source_prompt=__lowercase , image=__lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowercase , output_type='''np''' , )
__lowerCamelCase : Dict =output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 363 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__snake_case : Dict = None
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__snake_case : List[str] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__snake_case : List[str] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'attention_mask']
__snake_case = TaTokenizer
__snake_case = []
def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Any=1_00 , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Tuple , ) -> Optional[Any]:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A__ : Tuple =[f"<extra_id_{i}>" for i in range(__UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
A__ : int =len(set(filter(lambda lowerCAmelCase_ : bool("""extra_id_""" in str(__UpperCamelCase ) ) , __UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , extra_ids=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
A__ : str =vocab_file
A__ : List[Any] =False if not self.vocab_file else True
A__ : List[str] =extra_ids
@staticmethod
def lowercase__ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
A__ : Dict =TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __UpperCamelCase , )
return max_model_length
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : List[Any] =os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
A__ : Any =token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> Union[str, Any]:
'''simple docstring'''
A__ : str =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
return list(
set(filter(lambda lowerCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , __UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
return [self.convert_tokens_to_ids(__UpperCamelCase ) for token in self.get_sentinel_tokens()]
| 215 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A:
def __init__( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=1_3 , __UpperCamelCase : str=3_2 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : int=[3_2, 6_4, 1_2_8] , __UpperCamelCase : Tuple=[1, 2, 1] , __UpperCamelCase : List[Any]=[2, 2, 4] , __UpperCamelCase : int=2 , __UpperCamelCase : Tuple=2.0 , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : int=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Tuple=0.02 , __UpperCamelCase : Union[str, Any]=1E-5 , __UpperCamelCase : Dict=True , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : Union[str, Any]=8 , __UpperCamelCase : Optional[Any]=["stage1", "stage2"] , __UpperCamelCase : Tuple=[1, 2] , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Dict ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = FocalNetModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(__UpperCamelCase )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = FocalNetBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Any ):
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowercase__ ( self : str ):
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=3_7 , has_text_modality=__UpperCamelCase )
def lowercase__ ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Tuple ):
return
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCamelCase )
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def lowercase__ ( self : str ):
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def lowercase__ ( self : List[str] ):
pass
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(__UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : int ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
@slow
def lowercase__ ( self : Dict ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__ ( self : int ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class __A( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ):
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ):
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__UpperCamelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_8_1 )
@require_torch
class __A( UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (FocalNetBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = FocalNetConfig
SCREAMING_SNAKE_CASE = False
def lowercase__ ( self : Any ):
lowerCamelCase_ = FocalNetModelTester(self )
| 272 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.0_2 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = range_bbox
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase = bbox[i, j, 3]
__lowerCamelCase = bbox[i, j, 1]
__lowerCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase = bbox[i, j, 2]
__lowerCamelCase = bbox[i, j, 0]
__lowerCamelCase = t
__lowerCamelCase = tf.convert_to_tensor(_snake_case )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = TFLayoutLMModel(config=_snake_case )
__lowerCamelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
__lowerCamelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case )
__lowerCamelCase = model(_snake_case , _snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = TFLayoutLMForMaskedLM(config=_snake_case )
__lowerCamelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFLayoutLMForSequenceClassification(config=_snake_case )
__lowerCamelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFLayoutLMForTokenClassification(config=_snake_case )
__lowerCamelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = TFLayoutLMForQuestionAnswering(config=_snake_case )
__lowerCamelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 10
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TFLayoutLMModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFLayoutLMModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
__lowerCamelCase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
__lowerCamelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__lowerCamelCase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
__lowerCamelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__lowerCamelCase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the sequence output on [0, :3, :3]
__lowerCamelCase = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) )
# test the pooled output on [1, :3]
__lowerCamelCase = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__lowerCamelCase = outputs.loss
__lowerCamelCase = (2,)
self.assertEqual(loss.shape , _snake_case )
# test the shape of the logits
__lowerCamelCase = outputs.logits
__lowerCamelCase = (2, 2)
self.assertEqual(logits.shape , _snake_case )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
# test the shape of the logits
__lowerCamelCase = outputs.logits
__lowerCamelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _snake_case )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the shape of the logits
__lowerCamelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _snake_case )
self.assertEqual(outputs.end_logits.shape , _snake_case )
| 575 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : List[str] ={
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] =[
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 575 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40 |
from math import asin, atan, cos, radians, sin, sqrt, tan
__A = 6_3_7_8_1_3_7.0
__A = 6_3_5_6_7_5_2.3_1_4_2_4_5
__A = 637_8137
def __a ( lowerCAmelCase_ : float ,lowerCAmelCase_ : float ,lowerCAmelCase_ : float ,lowerCAmelCase_ : float ) -> float:
'''simple docstring'''
UpperCAmelCase_= (AXIS_A - AXIS_B) / AXIS_A
UpperCAmelCase_= atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
UpperCAmelCase_= atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
UpperCAmelCase_= radians(lowerCAmelCase_ )
UpperCAmelCase_= radians(lowerCAmelCase_ )
# Equation
UpperCAmelCase_= sin((phi_a - phi_a) / 2 )
UpperCAmelCase_= sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCAmelCase_= sqrt(sin_sq_phi + (cos(lowerCAmelCase_ ) * cos(lowerCAmelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 593 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = ['image_processor']
SCREAMING_SNAKE_CASE__ = 'SamImageProcessor'
def __init__( self : Union[str, Any] , lowerCAmelCase : Tuple ):
'''simple docstring'''
super().__init__(lowercase_ )
UpperCAmelCase_ = self.image_processor
UpperCAmelCase_ = -10
UpperCAmelCase_ = self.image_processor.size["longest_edge"]
def __call__( self : List[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , **lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
UpperCAmelCase_ = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
UpperCAmelCase_ = original_sizes.numpy()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
UpperCAmelCase_ = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def __A ( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : int=None , lowerCAmelCase : List[Any]="pt" , ):
'''simple docstring'''
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
UpperCAmelCase_ = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
UpperCAmelCase_ = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCAmelCase_ , UpperCAmelCase_ = self._pad_points_and_labels(lowercase_ , lowercase_ )
UpperCAmelCase_ = np.array(lowercase_ )
if input_labels is not None:
UpperCAmelCase_ = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
UpperCAmelCase_ = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
UpperCAmelCase_ = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
UpperCAmelCase_ = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
UpperCAmelCase_ = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
UpperCAmelCase_ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCAmelCase_ = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
UpperCAmelCase_ = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCAmelCase_ = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
UpperCAmelCase_ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCAmelCase_ = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
UpperCAmelCase_ = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCAmelCase_ = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
UpperCAmelCase_ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCAmelCase_ = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
UpperCAmelCase_ = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def __A ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = max([point.shape[0] for point in input_points] )
UpperCAmelCase_ = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
UpperCAmelCase_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCAmelCase_ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
UpperCAmelCase_ = processed_input_points
return input_points, input_labels
def __A ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : str , lowerCAmelCase : str=False ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = original_size
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
UpperCAmelCase_ = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
UpperCAmelCase_ = coords.reshape(-1 , 2 , 2 )
UpperCAmelCase_ = coords[..., 0] * (new_w / old_w)
UpperCAmelCase_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCAmelCase_ = coords.reshape(-1 , 4 )
return coords
def __A ( self : Optional[int] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=None , ):
'''simple docstring'''
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
UpperCAmelCase_ = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
UpperCAmelCase_ = [np.array(lowercase_ ) for input_point in input_points]
else:
UpperCAmelCase_ = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
UpperCAmelCase_ = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
UpperCAmelCase_ = [np.array(lowercase_ ) for label in input_labels]
else:
UpperCAmelCase_ = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
UpperCAmelCase_ = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
UpperCAmelCase_ = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
UpperCAmelCase_ = None
return input_points, input_labels, input_boxes
@property
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def __A ( self : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any ):
'''simple docstring'''
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ ) | 716 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a: Optional[int] = """CompVis/stable-diffusion-v1-1"""
_a: Dict = """CompVis/stable-diffusion-v1-2"""
_a: Optional[Any] = """CompVis/stable-diffusion-v1-3"""
_a: str = """CompVis/stable-diffusion-v1-4"""
class __UpperCamelCase ( lowercase ):
def __init__( self : Union[str, Any] , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : CLIPTextModel , lowerCAmelCase : CLIPTokenizer , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase : StableDiffusionSafetyChecker , lowerCAmelCase : CLIPImageProcessor , lowerCAmelCase : bool = True , ):
'''simple docstring'''
super()._init_()
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = StableDiffusionPipeline(
vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , requires_safety_checker=lowerCAmelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __A ( self : Tuple ):
'''simple docstring'''
return {k: getattr(self , lowerCAmelCase ) for k in self.config.keys() if not k.startswith("_" )}
def __A ( self : Dict , lowerCAmelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase )
def __A ( self : int ):
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase )
@torch.no_grad()
def __A ( self : Union[str, Any] , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : Tuple , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
@torch.no_grad()
def __A ( self : Dict , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : List[Any] , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
@torch.no_grad()
def __A ( self : List[str] , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
@torch.no_grad()
def __A ( self : Dict , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
@torch.no_grad()
def __A ( self : Union[str, Any] , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
self.to(lowerCAmelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] ) | 268 | 0 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = 13 , UpperCamelCase__ = 64 , UpperCamelCase__ = 2 , UpperCamelCase__ = 3 , UpperCamelCase__ = 3 , UpperCamelCase__ = True , UpperCamelCase__ = True , UpperCamelCase__ = 1_28 , UpperCamelCase__=[16, 32, 64, 1_28] , UpperCamelCase__ = 7 , UpperCamelCase__ = 4 , UpperCamelCase__ = 37 , UpperCamelCase__ = "gelu" , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 10 , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 2 , UpperCamelCase__ = 1 , UpperCamelCase__ = 1_28 , UpperCamelCase__ = [2, 2, 2, 2] , UpperCamelCase__ = 2 , UpperCamelCase__ = 2 , ):
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def lowercase_ ( self ):
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCamelCase__ )
A__ = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCamelCase__ )
A__ = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCamelCase__ )
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : str = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowercase__ : str = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
lowercase__ : Union[str, Any] = False
lowercase__ : Dict = False
def lowercase_ ( self ):
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self )
A__ = ConfigTester(
self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def lowercase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase__ )
A__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ = model_class(UpperCamelCase__ )
A__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
if hasattr(self.model_tester , "encoder_seq_length" ):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
A__ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
A__ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
A__ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
A__ = getattr(self.model_tester , "key_length" , UpperCamelCase__ )
A__ = getattr(self.model_tester , "chunk_length" , UpperCamelCase__ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCamelCase__ )
A__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ )
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCamelCase__ )
A__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ )
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCamelCase__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCamelCase__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCamelCase__ )
self.assertTrue(outputs_dict is not None )
def __a ( ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase__ , return_tensors="tf" )
# forward pass
A__ = model(**UpperCamelCase__ , training=UpperCamelCase__ )
# verify the logits
A__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase__ , return_tensors="tf" )
# forward pass
A__ = model(**UpperCamelCase__ , training=UpperCamelCase__ )
# verify the logits
A__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) | 337 |
"""simple docstring"""
def __a ( A = 10 ) -> str:
'''simple docstring'''
if not isinstance(A , A ) or n < 0:
raise ValueError("Invalid input" )
A__ = 10**n
A__ = 28_433 * (pow(2 , 7_830_457 , A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''') | 337 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__a , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__a , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__a )
return parser.parse_args()
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = parse_args()
# Import training_script as a module.
_a : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_a : int = script_fpath.stem
_a : List[str] = importlib.import_module(__a )
# Patch sys.argv
_a : Union[str, Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 319 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 319 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__lowerCAmelCase =logging.getLogger(__name__)
@dataclass
class _snake_case :
"""simple docstring"""
_UpperCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_UpperCamelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class _snake_case :
"""simple docstring"""
_UpperCamelCase = field(default=snake_case , metadata={"help": "The input training data file (a text file)."} )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_UpperCamelCase = field(
default=snake_case , metadata={"help": "The number of processes to use for the preprocessing."} , )
_UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
_UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
if self.train_file is not None:
a_ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
a_ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _snake_case :
"""simple docstring"""
_UpperCamelCase = 42
_UpperCamelCase = True
_UpperCamelCase = None
_UpperCamelCase = None
def __call__( self , UpperCAmelCase__ ) -> Optional[Any]:
a_ = 'label' if 'label' in features[0].keys() else 'labels'
a_ = [feature.pop(UpperCAmelCase__ ) for feature in features]
a_ = len(UpperCAmelCase__ )
a_ = len(features[0]['input_ids'] )
a_ = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
a_ = list(chain(*UpperCAmelCase__ ) )
a_ = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
a_ = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
a_ = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def a ( ) -> Tuple:
"""simple docstring"""
a_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a_ = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
a_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
a_ = {}
if data_args.train_file is not None:
a_ = data_args.train_file
if data_args.validation_file is not None:
a_ = data_args.validation_file
a_ = data_args.train_file.split('.' )[-1]
a_ = load_dataset(
_UpperCAmelCase , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
a_ = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
a_ = [F'''ending{i}''' for i in range(4 )]
a_ = 'sent1'
a_ = 'sent2'
if data_args.max_seq_length is None:
a_ = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
a_ = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
a_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCAmelCase ):
a_ = [[context] * 4 for context in examples[context_name]]
a_ = examples[question_header_name]
a_ = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(_UpperCAmelCase )
]
# Flatten out
a_ = list(chain(*_UpperCAmelCase ) )
a_ = list(chain(*_UpperCAmelCase ) )
# Tokenize
a_ = tokenizer(
_UpperCAmelCase , _UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
a_ = raw_datasets['train']
if data_args.max_train_samples is not None:
a_ = min(len(_UpperCAmelCase ) , data_args.max_train_samples )
a_ = train_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
a_ = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
a_ = raw_datasets['validation']
if data_args.max_eval_samples is not None:
a_ = min(len(_UpperCAmelCase ) , data_args.max_eval_samples )
a_ = eval_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
a_ = eval_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
a_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCAmelCase ):
a_ , a_ = eval_predictions
a_ = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
a_ = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , )
# Training
if training_args.do_train:
a_ = None
if training_args.resume_from_checkpoint is not None:
a_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a_ = last_checkpoint
a_ = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
a_ = train_result.metrics
a_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
a_ = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a_ = trainer.evaluate()
a_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
a_ = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
a_ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def a ( _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 697 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 1 |
'''simple docstring'''
from __future__ import annotations
A = 10
def UpperCAmelCase ( UpperCAmelCase__ : list[int]):
lowerCamelCase : List[str] = 1
lowerCamelCase : Optional[Any] = max(UpperCAmelCase__)
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase__)]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Union[str, Any] = int((i / placement) % RADIX)
buckets[tmp].append(UpperCAmelCase__)
# put each buckets' contents into list_of_ints
lowerCamelCase : List[str] = 0
for b in range(UpperCAmelCase__):
for i in buckets[b]:
lowerCamelCase : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( a__ , unittest.TestCase):
_lowerCAmelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def UpperCAmelCase_ ( self, A=0 ):
"""simple docstring"""
lowerCamelCase : List[Any] = floats_tensor((1, 3, 128, 128), rng=random.Random(A ) )
lowerCamelCase : Tuple = np.random.RandomState(A )
lowerCamelCase : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
lowerCamelCase : List[Any] = pipe(**A ).images
lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : Union[str, Any] = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
lowerCamelCase : Any = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Dict = self.get_dummy_inputs()
lowerCamelCase : Dict = pipe(**A ).images
lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : Union[str, Any] = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
lowerCamelCase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
# warmup pass to apply optimizations
lowerCamelCase : Union[str, Any] = pipe(**self.get_dummy_inputs() )
lowerCamelCase : Any = self.get_dummy_inputs()
lowerCamelCase : Optional[Any] = pipe(**A ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : Dict = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
lowerCamelCase : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : str = self.get_dummy_inputs()
lowerCamelCase : List[Any] = pipe(**A ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : List[str] = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
lowerCamelCase : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
lowerCamelCase : List[Any] = pipe(**A ).images
lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : Optional[int] = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : str = self.get_dummy_inputs()
lowerCamelCase : Tuple = pipe(**A ).images
lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase : List[Any] = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase):
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ort.SessionOptions()
lowerCamelCase : Union[str, Any] = False
return options
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCamelCase : int = init_image.resize((768, 512) )
# using the PNDM scheduler by default
lowerCamelCase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=A, feature_extractor=A, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Optional[Any] = 'A fantasy landscape, trending on artstation'
lowerCamelCase : Dict = np.random.RandomState(0 )
lowerCamelCase : Any = pipe(
prompt=A, image=A, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=A, output_type='np', )
lowerCamelCase : Union[str, Any] = output.images
lowerCamelCase : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowerCamelCase : List[Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCamelCase : Any = init_image.resize((768, 512) )
lowerCamelCase : List[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx' )
lowerCamelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=A, safety_checker=A, feature_extractor=A, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Union[str, Any] = 'A fantasy landscape, trending on artstation'
lowerCamelCase : List[Any] = np.random.RandomState(0 )
lowerCamelCase : Tuple = pipe(
prompt=A, image=A, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=A, output_type='np', )
lowerCamelCase : List[Any] = output.images
lowerCamelCase : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowerCamelCase : int = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 449 | 0 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 88 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int ) -> Tuple:
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
lowerCAmelCase :List[Any] = img
lowerCAmelCase :Optional[int] = img.shape[1]
lowerCAmelCase :Union[str, Any] = img.shape[0]
lowerCAmelCase :Optional[int] = dst_width
lowerCAmelCase :Optional[int] = dst_height
lowerCAmelCase :Any = self.src_w / self.dst_w
lowerCAmelCase :Dict = self.src_h / self.dst_h
lowerCAmelCase :int = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCAmelCase :Optional[Any] = self.img[self.get_y(UpperCAmelCase )][self.get_x(UpperCAmelCase )]
def UpperCAmelCase__ ( self : Optional[int] , UpperCAmelCase : int ) -> int:
return int(self.ratio_x * x )
def UpperCAmelCase__ ( self : List[str] , UpperCAmelCase : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = 8_00, 6_00
__SCREAMING_SNAKE_CASE = imread('image_data/lena.jpg', 1)
__SCREAMING_SNAKE_CASE = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows() | 553 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(snake_case_ )
__a : Union[str, Any] = -1
__a : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(snake_case_ )
__a : Tuple = model.generate(snake_case_ , max_new_tokens=1_0 , do_sample=snake_case_ )
__a : Tuple = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__a : List[str] = TextStreamer(snake_case_ )
model.generate(snake_case_ , max_new_tokens=1_0 , do_sample=snake_case_ , streamer=snake_case_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__a : int = cs.out[:-1]
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase (self : Any ):
__a : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(snake_case_ )
__a : List[Any] = -1
__a : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(snake_case_ )
__a : List[Any] = model.generate(snake_case_ , max_new_tokens=1_0 , do_sample=snake_case_ )
__a : Tuple = tokenizer.decode(greedy_ids[0] )
__a : Union[str, Any] = TextIteratorStreamer(snake_case_ )
__a : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
__a : Tuple = Thread(target=model.generate , kwargs=snake_case_ )
thread.start()
__a : Optional[int] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(snake_case_ )
__a : Tuple = -1
__a : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(snake_case_ )
__a : Optional[int] = model.generate(snake_case_ , max_new_tokens=1_0 , do_sample=snake_case_ )
__a : str = greedy_ids[:, input_ids.shape[1] :]
__a : Optional[int] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__a : List[Any] = TextStreamer(snake_case_ , skip_prompt=snake_case_ )
model.generate(snake_case_ , max_new_tokens=1_0 , do_sample=snake_case_ , streamer=snake_case_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__a : Dict = cs.out[:-1]
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase (self : List[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__a : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
__a : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(snake_case_ )
__a : Optional[int] = -1
__a : Optional[Any] = torch.ones((1, 5) , device=snake_case_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__a : List[Any] = TextStreamer(snake_case_ , skip_special_tokens=snake_case_ )
model.generate(snake_case_ , max_new_tokens=1 , do_sample=snake_case_ , streamer=snake_case_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__a : Optional[Any] = cs.out[:-1] # Remove the final "\n"
__a : str = tokenizer(snake_case_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCAmelCase (self : Any ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(snake_case_ )
__a : Any = -1
__a : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(snake_case_ )
__a : Optional[Any] = TextIteratorStreamer(snake_case_ , timeout=0.001 )
__a : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
__a : Union[str, Any] = Thread(target=model.generate , kwargs=snake_case_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(snake_case_ ):
__a : Any = ''''''
for new_text in streamer:
streamer_text += new_text
| 326 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = ["input_features", "attention_mask"]
def __init__(self : Dict , snake_case_ : Tuple=8_0 , snake_case_ : Tuple=1_6_0_0_0 , snake_case_ : Union[str, Any]=8_0 , snake_case_ : List[Any]=0.0 , snake_case_ : Optional[Any]=True , snake_case_ : Any=True , snake_case_ : int=True , **snake_case_ : Dict , ):
super().__init__(feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , **snake_case_ )
__a : int = num_mel_bins
__a : Dict = do_ceptral_normalize
__a : Union[str, Any] = normalize_means
__a : int = normalize_vars
__a : Optional[Any] = True
def lowerCAmelCase (self : Any , snake_case_ : np.ndarray , ):
__a : Union[str, Any] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
__a : Any = torch.from_numpy(snake_case_ ).unsqueeze(0 )
__a : List[Any] = ta_kaldi.fbank(snake_case_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase (snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : Optional[bool] = True , snake_case_ : Optional[bool] = True , snake_case_ : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__a : Optional[int] = x[:input_length].mean(axis=0 )
__a : Optional[int] = np.subtract(snake_case_ , snake_case_ )
if normalize_vars:
__a : Optional[Any] = x[:input_length].std(axis=0 )
__a : Optional[Any] = np.divide(snake_case_ , snake_case_ )
if input_length < x.shape[0]:
__a : Optional[int] = padding_value
# make sure array is in float32
__a : Tuple = x.astype(np.floataa )
return x
def lowerCAmelCase (self : List[Any] , snake_case_ : List[np.ndarray] , snake_case_ : Optional[np.ndarray] = None ):
__a : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(snake_case_ , snake_case_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(snake_case_ , snake_case_ )
]
def __call__(self : List[str] , snake_case_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Optional[int] = None , snake_case_ : bool = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , **snake_case_ : int , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__a : Dict = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
__a : List[str] = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Optional[int] = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
__a : Optional[int] = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Dict = [raw_speech]
# extract fbank features
__a : Union[str, Any] = [self._extract_fbank_features(snake_case_ ) for waveform in raw_speech]
# convert into correct format for padding
__a : str = BatchFeature({'''input_features''': features} )
__a : Union[str, Any] = self.pad(
snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
# make sure list is in array format
__a : List[Any] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , snake_case_ ):
__a : List[str] = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_features]
__a : Tuple = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(snake_case_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__a : int = (
np.array(snake_case_ , dtype=np.intaa )
if self._get_padding_strategies(snake_case_ , max_length=snake_case_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__a : List[str] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=snake_case_ )
if return_tensors is not None:
__a : Optional[int] = padded_inputs.convert_to_tensors(snake_case_ )
return padded_inputs
| 326 | 1 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( ) -> List[Any]:
# Get the sagemaker specific mp parameters from smp_options variable.
lowercase__ : List[Any] = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowercase__ : Optional[int] = json.loads(lowercase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowercase__ : Tuple = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowercase__ : List[Any] = json.loads(lowercase__ )
if not mpi_options.get('''sagemaker_mpi_enabled''' , lowercase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __A ( _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase : Any = field(
default="" ,metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} ,)
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' ,A_ ,)
@cached_property
def UpperCAmelCase ( self : Optional[int] ) -> "torch.device":
"""simple docstring"""
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
lowercase__ : List[Any] = torch.device('''cpu''' )
lowercase__ : str = 0
elif is_sagemaker_model_parallel_available():
lowercase__ : Any = smp.local_rank()
lowercase__ : Dict = torch.device('''cuda''' ,A_ )
lowercase__ : Optional[Any] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' ,timeout=self.ddp_timeout_delta )
lowercase__ : int = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
lowercase__ : str = torch.device('''cuda''' ,self.local_rank )
lowercase__ : Tuple = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowercase__ : Optional[int] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowercase__ : str = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' ,timeout=self.ddp_timeout_delta )
lowercase__ : int = torch.device('''cuda''' ,self.local_rank )
lowercase__ : Optional[Any] = 1
if device.type == "cuda":
torch.cuda.set_device(A_ )
return device
@property
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return False
| 560 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__lowerCAmelCase : Tuple = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , A_ , standard_warn=A_ )
__lowerCAmelCase : List[str] = dict(scheduler.config )
__lowerCAmelCase : str = 1
__lowerCAmelCase : Optional[Any] = FrozenDict(A_ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__lowerCAmelCase : List[str] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , A_ , standard_warn=A_ )
__lowerCAmelCase : Any = dict(scheduler.config )
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : int = FrozenDict(A_ )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=A_ , segmentation_processor=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , )
def UpperCamelCase__ ( self , A_ = "auto" ) ->Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCAmelCase : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self.enable_attention_slicing(A_ )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__lowerCAmelCase : Union[str, Any] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , A_ , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 50 , A_ = 7.5 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , **A_ , ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__lowerCAmelCase : List[str] = self.segmentation_model(**A_ )
__lowerCAmelCase : Tuple = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__lowerCAmelCase : Dict = self.numpy_to_pil(A_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__lowerCAmelCase : List[str] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=A_ , image=A_ , mask_image=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , )
| 492 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = ['image_processor', 'feature_extractor']
__snake_case = 'TvltImageProcessor'
__snake_case = 'TvltFeatureExtractor'
def __init__( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
super().__init__(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
A__ : List[Any] =image_processor
A__ : Any =feature_extractor
def __call__( self : Any , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : str=False , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[Any] , ) -> List[Any]:
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
A__ : Dict =None
if images is not None:
A__ : Any =self.image_processor(lowerCAmelCase_ , mask_pixel=lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
if images_mixed is not None:
A__ : Any =self.image_processor(lowerCAmelCase_ , is_mixed=lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
if audio is not None:
A__ : Optional[int] =self.feature_extractor(
lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , mask_audio=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : List[str] ={}
if audio is not None:
output_dict.update(lowerCAmelCase_ )
if images is not None:
output_dict.update(lowerCAmelCase_ )
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase_ )
return output_dict
@property
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : List[Any] =self.image_processor.model_input_names
A__ : Union[str, Any] =self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : List[str] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def snake_case ( ):
print("Making key files..." )
make_key_files("rsa" ,10_24 )
print("Key files generation successful." )
def snake_case ( A__ ):
print("Generating prime p..." )
UpperCAmelCase_ : int = rabinMiller.generate_large_prime(A__ )
print("Generating prime q..." )
UpperCAmelCase_ : Dict = rabinMiller.generate_large_prime(A__ )
UpperCAmelCase_ : Union[str, Any] = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
UpperCAmelCase_ : Any = random.randrange(2 ** (key_size - 1) ,2 ** (key_size) )
if cryptoMath.gcd(A__ ,(p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
UpperCAmelCase_ : Union[str, Any] = cryptoMath.find_mod_inverse(A__ ,(p - 1) * (q - 1) )
UpperCAmelCase_ : List[str] = (n, e)
UpperCAmelCase_ : List[str] = (n, d)
return (public_key, private_key)
def snake_case ( A__ ,A__ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
UpperCAmelCase_ , UpperCAmelCase_ : Any = generate_key(A__ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" ,"w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" ,"w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 95 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ : int = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self : Any , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : float = None , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = size if size is not None else {"shortest_edge": 384}
__lowercase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__lowercase = do_resize
__lowercase = size
# Default value set here for backwards compatibility where the value in config is None
__lowercase = crop_pct if crop_pct is not None else 224 / 256
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self : Dict , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : float , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
__lowercase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
__lowercase = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowercase = int(shortest_edge / crop_pct )
__lowercase = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__lowercase = resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase , **lowerCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Dict , lowerCamelCase : np.ndarray , lowerCamelCase : Union[int, float] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : str , ):
'''simple docstring'''
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : int , lowerCamelCase : ImageInput , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : float = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : bool = None , lowerCamelCase : float = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase : Any , ):
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = crop_pct if crop_pct is not None else self.crop_pct
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__lowercase = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowerCamelCase , size=lowerCamelCase , crop_pct=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__lowercase = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__lowercase = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 402 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = IFInpaintingPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCamelCase__ ( self ):
return self._get_dummy_components()
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(UpperCamelCase_ ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(UpperCamelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCamelCase__ ( self ):
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
self._test_save_load_local()
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 704 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
UpperCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
snake_case_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
snake_case_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
snake_case_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = '''weight_g'''
elif "weight_v" in name:
snake_case_ = '''weight_v'''
elif "bias" in name:
snake_case_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = '''weight'''
else:
snake_case_ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = full_name.split('''conv_layers.''' )[-1]
snake_case_ = name.split('''.''' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True )-> Dict:
"""simple docstring"""
if config_path is not None:
snake_case_ = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
snake_case_ = UniSpeechSatConfig()
snake_case_ = ''''''
if is_finetuned:
snake_case_ = UniSpeechSatForCTC(SCREAMING_SNAKE_CASE )
else:
snake_case_ = UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
snake_case_ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 531 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any]=None ):
SCREAMING_SNAKE_CASE__ = None
if token is not None:
SCREAMING_SNAKE_CASE__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE__ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
SCREAMING_SNAKE_CASE__ = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
SCREAMING_SNAKE_CASE__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = requests.get(url + f'''&page={i + 2}''' , headers=UpperCamelCase__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: str=None ):
SCREAMING_SNAKE_CASE__ = None
if token is not None:
SCREAMING_SNAKE_CASE__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE__ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
SCREAMING_SNAKE_CASE__ = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
SCREAMING_SNAKE_CASE__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = requests.get(url + f'''&page={i + 2}''' , headers=UpperCamelCase__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = None
if token is not None:
SCREAMING_SNAKE_CASE__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ , allow_redirects=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = result.headers["""Location"""]
SCREAMING_SNAKE_CASE__ = requests.get(UpperCamelCase__ , allow_redirects=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , f'''{artifact_name}.zip''' )
with open(UpperCamelCase__ , """wb""" ) as fp:
fp.write(response.content )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Any=None ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = None
with zipfile.ZipFile(UpperCamelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCamelCase__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE__ = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE__ = line[: line.index(""": """ )]
SCREAMING_SNAKE_CASE__ = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE__ = line[len("""FAILED """ ) :]
failed_tests.append(UpperCamelCase__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE__ = line
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase__ )} for `errors` '''
f'''and {len(UpperCamelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
SCREAMING_SNAKE_CASE__ = None
if job_name and job_links:
SCREAMING_SNAKE_CASE__ = job_links.get(UpperCamelCase__ , UpperCamelCase__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE__ = [x + [y] + [job_link] for x, y in zip(UpperCamelCase__ , UpperCamelCase__ )]
return result
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for p in os.listdir(UpperCamelCase__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCamelCase__ , job_links=UpperCamelCase__ ) )
return errors
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: str=None ):
SCREAMING_SNAKE_CASE__ = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE__ = counter.most_common()
SCREAMING_SNAKE_CASE__ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE__ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE__ = dict(sorted(r.items() , key=lambda UpperCamelCase__ : item[1]["count"] , reverse=UpperCamelCase__ ) )
return r
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
SCREAMING_SNAKE_CASE__ = test.split("""/""" )[2]
else:
SCREAMING_SNAKE_CASE__ = None
return test
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
SCREAMING_SNAKE_CASE__ = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE__ = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE__ = {x[2] for x in logs}
SCREAMING_SNAKE_CASE__ = {}
for test in tests:
SCREAMING_SNAKE_CASE__ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE__ = counter.most_common()
SCREAMING_SNAKE_CASE__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE__ = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE__ = {"""count""": n_errors, """errors""": error_counts}
SCREAMING_SNAKE_CASE__ = dict(sorted(r.items() , key=lambda UpperCamelCase__ : item[1]["count"] , reverse=UpperCamelCase__ ) )
return r
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = """| no. | error | status |"""
SCREAMING_SNAKE_CASE__ = """|-:|:-|:-|"""
SCREAMING_SNAKE_CASE__ = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE__ = reduced_by_error[error]["""count"""]
SCREAMING_SNAKE_CASE__ = f'''| {count} | {error[:100]} | |'''
lines.append(UpperCamelCase__ )
return "\n".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = """| model | no. of errors | major error | count |"""
SCREAMING_SNAKE_CASE__ = """|-:|-:|-:|-:|"""
SCREAMING_SNAKE_CASE__ = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE__ = reduced_by_model[model]["""count"""]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = list(reduced_by_model[model]["""errors"""].items() )[0]
SCREAMING_SNAKE_CASE__ = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(UpperCamelCase__ )
return "\n".join(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
_lowerCamelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowerCamelCase = get_job_links(args.workflow_run_id, token=args.token)
_lowerCamelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowerCamelCase = k.find(' / ')
_lowerCamelCase = k[index + len(' / ') :]
_lowerCamelCase = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowerCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowerCamelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowerCamelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowerCamelCase = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowerCamelCase = reduce_by_error(errors)
_lowerCamelCase = reduce_by_model(errors)
_lowerCamelCase = make_github_table(reduced_by_error)
_lowerCamelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa) | 6 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case , __snake_case : List[Any] = image.size
__snake_case , __snake_case : Tuple = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__snake_case : str = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
__snake_case : int = np.array(__lowerCamelCase ).astype(np.floataa ) / 2_5_5.0
__snake_case : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 )
__snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase )
return 2.0 * image - 1.0
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : VQModel , lowerCamelCase : UNetaDModel , lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : List[str] , lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 100 , lowerCamelCase : Optional[float] = 0.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : Any = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Any = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}' )
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : List[Any] = preprocess(lowerCamelCase )
__snake_case , __snake_case : int = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__snake_case : str = (batch_size, self.unet.config.in_channels // 2, height, width)
__snake_case : str = next(self.unet.parameters() ).dtype
__snake_case : Tuple = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__snake_case : List[Any] = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
__snake_case : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : int = {}
if accepts_eta:
__snake_case : List[str] = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__snake_case : Union[str, Any] = torch.cat([latents, image] , dim=1 )
__snake_case : Optional[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : int = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Union[str, Any] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__snake_case : List[Any] = self.vqvae.decode(lowerCamelCase ).sample
__snake_case : Dict = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
__snake_case : Any = image / 2 + 0.5
__snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 81 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ ={"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 690 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : int=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=True , __lowerCamelCase : Dict=False , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=99 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=5_12 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : int=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def A__ ( self : Tuple ):
"""simple docstring"""
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def A__ ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Optional[int] ):
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A__ ( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = MPNetModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ = model(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = MPNetForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MPNetForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = MPNetForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MPNetForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ (_a , _a , unittest.TestCase ):
lowercase_ : Tuple = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowercase_ : Tuple = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : int = False
lowercase_ : Union[str, Any] = True
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = MPNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def A__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A__ ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowerCamelCase )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCamelCase )
def A__ ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCamelCase )
def A__ ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCamelCase )
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCamelCase )
@require_torch
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@slow
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
lowerCAmelCase__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCAmelCase__ = model(__lowerCamelCase )[0]
lowerCAmelCase__ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCAmelCase__ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
| 615 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : Union[str, Any] = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 615 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class snake_case_ :
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> List[Any]:
lowerCamelCase : str =scheduler
lowerCamelCase : List[str] =optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCamelCase : Tuple =split_batches
lowerCamelCase : List[Any] =step_with_optimizer
lowerCamelCase : Tuple =GradientState()
def __lowercase ( self , *__lowercase , **__lowercase ) -> Dict:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCamelCase : Dict =AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def __lowercase ( self ) -> int:
return self.scheduler.get_last_lr()
def __lowercase ( self ) -> str:
return self.scheduler.state_dict()
def __lowercase ( self , __lowercase ) -> List[str]:
self.scheduler.load_state_dict(__lowercase )
def __lowercase ( self ) -> Optional[Any]:
return self.scheduler.get_lr()
def __lowercase ( self , *__lowercase , **__lowercase ) -> Any:
return self.scheduler.print_lr(*__lowercase , **__lowercase )
| 262 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
snake_case_ = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 262 | 1 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = WavaVecaPhonemeCTCTokenizer
a_ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
_lowerCAmelCase : List[Any] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(" " )
_lowerCAmelCase : Optional[int] = dict(zip(__A , range(len(__A ) ) ) )
_lowerCAmelCase : Any = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
_lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=False , _snake_case=20 , _snake_case=5 ):
_lowerCAmelCase : Dict = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__A )) for i in range(len(__A ) )]
_lowerCAmelCase : Optional[int] = list(filter(lambda _snake_case : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__A ) , __A ) )
if max_length is not None and len(__A ) > max_length:
_lowerCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(__A ) < min_length and len(__A ) > 0:
while len(__A ) < min_length:
_lowerCAmelCase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCAmelCase : Optional[int] = [t[0] for t in toks]
# Ensure consistency
_lowerCAmelCase : Optional[Any] = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
if " " not in output_txt and len(__A ) > 1:
_lowerCAmelCase : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__A )
)
if with_prefix_space:
_lowerCAmelCase : Dict = ''' ''' + output_txt
_lowerCAmelCase : Tuple = tokenizer.encode(__A , add_special_tokens=__A )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE__ ( self , **_snake_case ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__A )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
_lowerCAmelCase : List[str] = tokenizer("m xxx ɪ" , do_phonemize=__A ).input_ids
self.assertEqual(__A , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
_lowerCAmelCase : List[Any] = tokenizer("m aaa ɪ ccc" , do_phonemize=__A ).input_ids
self.assertEqual(__A , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
_lowerCAmelCase : Union[str, Any] = tokenizer("maɪ c" , do_phonemize=__A ).input_ids
self.assertEqual(__A , [3, 200] ) # mai should be <unk> (=3)
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_lowerCAmelCase : Optional[Any] = '''Hello how are you'''
_lowerCAmelCase : Optional[Any] = tokenizer.phonemize(__A , phonemizer_lang="en-us" )
self.assertEqual(__A , "h ə l oʊ h aʊ ɑːɹ j uː" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_lowerCAmelCase : int = '''Hello how are you'''
_lowerCAmelCase : Union[str, Any] = tokenizer.phonemize(__A , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__A ).input_ids , tokenizer(__A , do_phonemize=__A ).input_ids )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_lowerCAmelCase : Tuple = '''Hello how are you'''
_lowerCAmelCase : Any = tokenizer.phonemize(__A , phonemizer_lang="en-us" )
_lowerCAmelCase : Optional[int] = tokenizer.decode(tokenizer(__A ).input_ids )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_lowerCAmelCase : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_lowerCAmelCase : Optional[int] = tokenizer.decode(sample_ids[0] )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(__A )
self.assertEqual(__A , batch_tokens[0] )
self.assertEqual(__A , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_lowerCAmelCase : Optional[int] = '''Hello how are you'''
_lowerCAmelCase : List[Any] = tokenizer.phonemize(__A , phonemizer_lang="en-us" )
self.assertEqual(__A , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_lowerCAmelCase : Optional[int] = '''Hello how are you'''
_lowerCAmelCase : str = tokenizer.phonemize(__A , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__A ).input_ids , tokenizer(__A , do_phonemize=__A ).input_ids )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
_lowerCAmelCase : Tuple = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_lowerCAmelCase : str = tokenizer.decode(sample_ids[0] )
_lowerCAmelCase : str = tokenizer.batch_decode(__A )
self.assertEqual(__A , batch_tokens[0] )
self.assertEqual(__A , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
_lowerCAmelCase : Optional[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__A )
_lowerCAmelCase : Dict = tokenizer.batch_decode(__A , filter_word_delimiter_token=__A )
self.assertEqual(__A , batch_tokens[0] )
self.assertEqual(__A , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_lowerCAmelCase : List[str] = '''Hello how are you'''
_lowerCAmelCase : str = tokenizer.phonemize(__A , phonemizer_lang="en-us" )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(tokenizer(__A ).input_ids , filter_word_delimiter_token=__A )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_lowerCAmelCase : Tuple = '''Hello how are you'''
_lowerCAmelCase : Dict = tokenizer.phonemize(__A , phonemizer_lang="en-us" )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(tokenizer(__A ).input_ids , filter_word_delimiter_token=__A )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , __A )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__A )
_lowerCAmelCase : int = '''Hello how are you'''
_lowerCAmelCase : Union[str, Any] = tokenizer(__A , phonemizer_lang="en-us" ).input_ids
_lowerCAmelCase : List[str] = tokenizer(__A , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__A , __A )
_lowerCAmelCase : Tuple = tokenizer.decode(__A )
_lowerCAmelCase : Optional[int] = tokenizer.decode(__A )
self.assertEqual(__A , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__A , "ɛ l o h aʊ a ʁ j u" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_lowerCAmelCase : List[Any] = '''Hello how Are you'''
_lowerCAmelCase : int = '''hello how are you'''
_lowerCAmelCase : Union[str, Any] = tokenizer(__A ).input_ids
_lowerCAmelCase : Union[str, Any] = tokenizer(__A ).input_ids
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
_lowerCAmelCase : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
_lowerCAmelCase : Tuple = tokenizer.batch_decode(__A )
self.assertEqual(__A , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _snake_case , _snake_case ):
_lowerCAmelCase : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_lowerCAmelCase : Dict = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_lowerCAmelCase : Any = tokenizer.decode(__A , output_char_offsets=__A , filter_word_delimiter_token=__A )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__A , __A ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(_snake_case , _snake_case ):
self.assertTrue(isinstance(__A , __A ) )
self.assertTrue(isinstance(outputs_list[0] , __A ) )
# transform list to ModelOutput
_lowerCAmelCase : Any = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(_snake_case , _snake_case ):
if isinstance(__A , __A ):
[recursive_check(__A , __A ) for la, la in zip(__A , __A )]
self.assertEqual(__A , __A )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
_lowerCAmelCase : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(__A , output_char_offsets=__A )
_lowerCAmelCase : Tuple = [tokenizer.decode(__A , output_char_offsets=__A ) for ids in sample_ids]
check_list_tuples_equal(__A , __A )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : List[Any] = tokenizer.vocab_size
_lowerCAmelCase : List[str] = len(__A )
self.assertNotEqual(__A , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCAmelCase : List[str] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
_lowerCAmelCase : Union[str, Any] = tokenizer.add_tokens(__A )
_lowerCAmelCase : List[Any] = tokenizer.vocab_size
_lowerCAmelCase : Dict = len(__A )
self.assertNotEqual(__A , 0 )
self.assertEqual(__A , __A )
self.assertEqual(__A , len(__A ) )
self.assertEqual(__A , all_size + len(__A ) )
_lowerCAmelCase : Optional[Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__A )
self.assertGreaterEqual(len(__A ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_lowerCAmelCase : List[Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
_lowerCAmelCase : str = tokenizer.add_special_tokens(__A )
_lowerCAmelCase : int = tokenizer.vocab_size
_lowerCAmelCase : Dict = len(__A )
self.assertNotEqual(__A , 0 )
self.assertEqual(__A , __A )
self.assertEqual(__A , len(__A ) )
self.assertEqual(__A , all_size_a + len(__A ) )
_lowerCAmelCase : List[Any] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__A )
self.assertGreaterEqual(len(__A ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode." )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip("The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode." )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_lowerCAmelCase : Any = self.get_tokenizers(fast=__A , do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
_lowerCAmelCase : int = tokenizer.convert_tokens_to_string(__A )
self.assertIsInstance(output["text"] , __A )
| 424 |
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int = 1_0_0_0 ) -> int:
SCREAMING_SNAKE_CASE_ : int =2**power
SCREAMING_SNAKE_CASE_ : Tuple =0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 443 | 0 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase__ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ):
if os.name == "nt":
UpperCAmelCase_ = CursorInfo()
UpperCAmelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
UpperCAmelCase_ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def a__ ( ):
if os.name == "nt":
UpperCAmelCase_ = CursorInfo()
UpperCAmelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
UpperCAmelCase_ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def a__ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 716 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Any , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(_UpperCAmelCase , size["shortest_edge"] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size["height"], size["width"]) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ) -> List[str]:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(_UpperCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(_UpperCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 14 | 0 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( _lowercase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = FlaxAutoencoderKL
@property
def a__ ( self : Optional[Any] ):
'''simple docstring'''
__magic_name__ = 4
__magic_name__ = 3
__magic_name__ = (3_2, 3_2)
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = jax.random.uniform(__UpperCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict | 545 |
'''simple docstring'''
from math import isqrt
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(_SCREAMING_SNAKE_CASE ) + 1 ) )
def __lowercase (_SCREAMING_SNAKE_CASE :int = 10**6 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : List[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 507 | 0 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] ):
a__ = [2, 1, 2, -1]
a__ = [1, 2, 3, 4]
def lowerCAmelCase_ ( self : int ):
a__ = len(self.first_signal )
a__ = len(self.second_signal )
a__ = max(a__ ,a__ )
# create a zero matrix of max_length x max_length
a__ = [[0] * max_length for i in range(a__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(a__ ):
a__ = deque(self.second_signal )
rotated_signal.rotate(a__ )
for j, item in enumerate(a__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
a__ = np.matmul(np.transpose(a__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(a__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 394 |
'''simple docstring'''
from manim import *
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*a__ ).arrange(a__ ,buff=0 )
a__ = VGroup(*a__ ).arrange(a__ ,buff=0 )
a__ = VGroup(a__ ,a__ ).arrange(a__ ,buff=0 )
a__ = Text("CPU" ,font_size=24 )
a__ = Group(a__ ,a__ ).arrange(a__ ,buff=0.5 ,aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
a__ = [mem.copy() for i in range(1 )]
a__ = VGroup(*a__ ).arrange(a__ ,buff=0 )
a__ = Text("GPU" ,font_size=24 )
a__ = Group(a__ ,a__ ).arrange(a__ ,buff=0.5 ,aligned_edge=a__ )
gpu.align_to(a__ ,a__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(a__ )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*a__ ).arrange(a__ ,buff=0 )
a__ = Text("Model" ,font_size=24 )
a__ = Group(a__ ,a__ ).arrange(a__ ,buff=0.5 ,aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(a__ ,run_time=1 ) ,Create(a__ ,run_time=1 ) ,Create(a__ ,run_time=1 ) ,)
a__ = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' ,font_size=24 ,)
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ ,run_time=2.5 ) ,Write(a__ ) ,Write(a__ ) )
self.add(a__ )
a__ = []
a__ = []
a__ = []
for i, rect in enumerate(a__ ):
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(a__ ,opacity=0.7 )
cpu_target.move_to(a__ )
cpu_target.generate_target()
a__ = 0.46 / 4
a__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=a__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=a__ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=a__ ,buff=0.0 )
cpu_targs.append(a__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(a__ ) )
second_animations.append(MoveToTarget(a__ ,run_time=1.5 ) )
self.play(*a__ )
self.play(*a__ )
self.wait()
| 394 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A : List[str] = 6_37_81_37.0
A : Dict = 6_35_67_52.31_42_45
A : Tuple = 6_378_137
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> float:
_lowercase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowercase = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
_lowercase = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowercase = haversine_distance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowercase = (b_lata + b_lata) / 2
_lowercase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowercase = (sin(SCREAMING_SNAKE_CASE_ ) ** 2) * (cos(SCREAMING_SNAKE_CASE_ ) ** 2)
_lowercase = cos(sigma / 2 ) ** 2
_lowercase = (sigma - sin(SCREAMING_SNAKE_CASE_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowercase = (cos(SCREAMING_SNAKE_CASE_ ) ** 2) * (sin(SCREAMING_SNAKE_CASE_ ) ** 2)
_lowercase = sin(sigma / 2 ) ** 2
_lowercase = (sigma + sin(SCREAMING_SNAKE_CASE_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 287 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
) | 287 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : Optional[int] = CLIPTokenizer
_A : Dict = CLIPTokenizerFast
_A : int = True
_A : Union[str, Any] = {}
_A : Optional[int] = False
def lowerCamelCase(self ):
super().setUp()
# fmt: off
A_ : Dict = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A_ : Tuple = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A_ : Tuple = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
A_ : Dict = {"""unk_token""": """<unk>"""}
A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : Union[str, Any] = """lower newer"""
A_ : Tuple = """lower newer"""
return input_text, output_text
def lowerCamelCase(self ):
A_ : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ : int = """lower newer"""
A_ : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
A_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : List[str] = tokens + [tokenizer.unk_token]
A_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
@require_ftfy
def lowerCamelCase(self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : int = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
A_ : Dict = tokenizer_s.tokenize(lowerCAmelCase_ )
A_ : Optional[Any] = tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
A_ : Dict = """xa\u0303y""" + """ """ + """x\xe3y"""
A_ : Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase_ )
A_ : Tuple = tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Test that the tokenization is identical on unicode of space type
A_ : Optional[int] = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
A_ : Any = tokenizer_s.tokenize(lowerCAmelCase_ )
A_ : str = tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Test that the tokenization is identical on unicode of line break type
A_ : Union[str, Any] = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
A_ : int = tokenizer_s.tokenize(lowerCAmelCase_ )
A_ : Optional[Any] = tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase(self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : Dict = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
A_ : Dict = f"""{text_of_1_token} {text_of_1_token}"""
A_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , )
A_ : Any = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ) + 1, len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
A_ : Any = f""" {text}"""
A_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , )
A_ : str = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ) + 1, 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
def lowerCamelCase(self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowerCamelCase(self ):
super().test_tokenization_python_rust_equals()
def lowerCamelCase(self ):
# CLIP always lower cases letters
pass
| 717 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def lowerCamelCase(*lowerCAmelCase_ , **lowerCAmelCase_ ):
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_A : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Union[str, Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
A_ : Union[str, Any] = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = vqa_pipeline(lowerCAmelCase_ , top_k=1 )
self.assertEqual(
lowerCAmelCase_ , [
[{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}],
[{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}],
] , )
@require_torch
def lowerCamelCase(self ):
A_ : Tuple = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
A_ : Any = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
A_ : Any = """How many cats are there?"""
A_ : Optional[int] = vqa_pipeline(image=lowerCAmelCase_ , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}, {"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}] )
A_ : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}, {"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}] )
@slow
@require_torch
def lowerCamelCase(self ):
A_ : int = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
A_ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
A_ : int = """How many cats are there?"""
A_ : List[Any] = vqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
A_ : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
A_ : Tuple = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowerCamelCase(self ):
pass
| 480 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a__ ( A__ ):
A = 'megatron-bert'
def __init__( self : int,_A : Optional[int]=2_9056,_A : Optional[Any]=1024,_A : Dict=24,_A : int=16,_A : List[Any]=4096,_A : str="gelu",_A : int=0.1,_A : Optional[Any]=0.1,_A : Optional[int]=512,_A : Dict=2,_A : Optional[int]=0.02,_A : str=1E-12,_A : List[str]=0,_A : Tuple="absolute",_A : Any=True,**_A : Dict,):
"""simple docstring"""
super().__init__(pad_token_id=_A,**_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = position_embedding_type
SCREAMING_SNAKE_CASE_ : List[Any] = use_cache
| 216 | import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowerCamelCase : Union[str, Any] = data_utils.TransfoXLTokenizer
__lowerCamelCase : Union[str, Any] = data_utils.TransfoXLCorpus
__lowerCamelCase : Optional[Any] = data_utils
__lowerCamelCase : Optional[int] = data_utils
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Any ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowerCAmelCase , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ : str = pickle.load(lowerCAmelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE_ : Dict = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f'Save vocabulary to {pytorch_vocab_dump_path}' )
SCREAMING_SNAKE_CASE_ : int = corpus.vocab.__dict__
torch.save(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(lowerCAmelCase , lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE_ : List[str] = os.path.abspath(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.abspath(lowerCAmelCase )
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE_ : Dict = TransfoXLConfig.from_json_file(lowerCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : int = TransfoXLLMHeadModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = load_tf_weights_in_transfo_xl(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : str = os.path.join(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {os.path.abspath(lowerCAmelCase )}' )
torch.save(model.state_dict() , lowerCAmelCase )
print(f'Save configuration file to {os.path.abspath(lowerCAmelCase )}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
__lowerCamelCase : Dict = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 216 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | """simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def _snake_case ( snake_case__ : list , snake_case__ : list ):
if len(snake_case__ ) != 2 or len(a[0] ) != 2 or len(snake_case__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
A = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _snake_case ( snake_case__ : list , snake_case__ : list ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case__ ) )
]
def _snake_case ( snake_case__ : list , snake_case__ : list ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case__ ) )
]
def _snake_case ( snake_case__ : list ):
if len(snake_case__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
A = len(snake_case__ )
A = matrix_length // 2
A = [[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ )]
A = [
[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ , snake_case__ )
]
A = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ )]
A = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ , snake_case__ )]
return top_left, top_right, bot_left, bot_right
def _snake_case ( snake_case__ : list ):
return len(snake_case__ ), len(matrix[0] )
def _snake_case ( snake_case__ : list ):
print('\n'.join(str(snake_case__ ) for line in matrix ) )
def _snake_case ( snake_case__ : list , snake_case__ : list ):
if matrix_dimensions(snake_case__ ) == (2, 2):
return default_matrix_multiplication(snake_case__ , snake_case__ )
A , A , A , A = split_matrix(snake_case__ )
A , A , A , A = split_matrix(snake_case__ )
A = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) )
A = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ )
A = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ )
A = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) )
A = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
A = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
A = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
A = matrix_addition(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ )
A = matrix_addition(snake_case__ , snake_case__ )
A = matrix_addition(snake_case__ , snake_case__ )
A = matrix_subtraction(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ )
# construct the new matrix from our 4 quadrants
A = []
for i in range(len(snake_case__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(snake_case__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _snake_case ( snake_case__ : list , snake_case__ : list ):
if matrix_dimensions(snake_case__ )[1] != matrix_dimensions(snake_case__ )[0]:
A = (
'Unable to multiply these matrices, please check the dimensions.\n'
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(snake_case__ )
A = matrix_dimensions(snake_case__ )
A = matrix_dimensions(snake_case__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A = max(*snake_case__ , *snake_case__ )
A = int(math.pow(2 , math.ceil(math.loga(snake_case__ ) ) ) )
A = matrixa
A = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , snake_case__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A = actual_strassen(snake_case__ , snake_case__ )
# Removing the additional zeros
for i in range(0 , snake_case__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_lowercase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_lowercase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa)) | 91 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case : List[str] = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__lowercase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase , id=__UpperCamelCase )
| 339 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase : list[float] ):
'''simple docstring'''
__lowercase = 0.00
__lowercase = 0
for resistor in resistors:
if resistor <= 0:
__lowercase = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__UpperCamelCase )
first_sum += 1 / float(__UpperCamelCase )
index += 1
return 1 / first_sum
def lowercase__ ( __UpperCamelCase : list[float] ):
'''simple docstring'''
__lowercase = 0.00
__lowercase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__lowercase = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__UpperCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=__SCREAMING_SNAKE_CASE , )
assert hasattr(self , '''env''' )
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
a_ : List[Any] = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
a_ : Optional[Any] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__SCREAMING_SNAKE_CASE , instance_count=__SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=__SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__SCREAMING_SNAKE_CASE , py_version='''py36''' , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Any ) -> Any:
TrainingJobAnalytics(__SCREAMING_SNAKE_CASE ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> str:
# create estimator
a_ : Dict = self.create_estimator(__SCREAMING_SNAKE_CASE )
# run training
estimator.fit()
# result dataframe
a_ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a_ : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
a_ : Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a_ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __SCREAMING_SNAKE_CASE )
| 466 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = 42
snake_case__ = 42
def _UpperCAmelCase ( __A : str ):
if not isinstance(__A , __A ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(__A ) )]
def _UpperCAmelCase ( __A : str ):
if not isinstance(__A , __A ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
a_ : str = all_rotations(__A )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
a_ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__A ),
}
return response
def _UpperCAmelCase ( __A : str , __A : int ):
if not isinstance(__A , __A ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
a_ : Union[str, Any] = int(__A )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(__A ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
a_ : Union[str, Any] = [''''''] * len(__A )
for _ in range(len(__A ) ):
for i in range(len(__A ) ):
a_ : int = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__lowerCAmelCase = 'Provide a string that I will generate its BWT transform: '
__lowerCAmelCase = input(entry_msg).strip()
__lowerCAmelCase = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
__lowerCAmelCase = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 466 | 1 |
from functools import lru_cache
@lru_cache
def __lowerCAmelCase ( snake_case : int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
UpperCAmelCase__ : Tuple = CycleDiffusionPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
UpperCAmelCase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
UpperCAmelCase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
UpperCAmelCase__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : str ):
torch.manual_seed(0 )
__lowerCamelCase: Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowerCamelCase: Dict = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
__lowerCamelCase: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase: str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase: int = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase: Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str=0 ):
__lowerCamelCase: Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
__lowerCamelCase: int = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase: Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Any ):
__lowerCamelCase: Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase: Union[str, Any] = self.get_dummy_components()
__lowerCamelCase: Union[str, Any] = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = pipe(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = output.images
__lowerCamelCase: int = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase: Dict = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Optional[int] = self.get_dummy_components()
for name, module in components.items():
if hasattr(SCREAMING_SNAKE_CASE_ , """half""" ):
__lowerCamelCase: Tuple = module.half()
__lowerCamelCase: Union[str, Any] = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = pipe(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: int = output.images
__lowerCamelCase: Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase: List[str] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : int ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return super().test_inference_batch_single_identical()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : str ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
__lowerCamelCase: Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
__lowerCamelCase: str = init_image.resize((512, 512) )
__lowerCamelCase: Dict = """CompVis/stable-diffusion-v1-4"""
__lowerCamelCase: Optional[int] = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""scheduler""" )
__lowerCamelCase: List[Any] = CycleDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase: List[Any] = """A black colored car"""
__lowerCamelCase: List[Any] = """A blue colored car"""
__lowerCamelCase: List[Any] = torch.manual_seed(0 )
__lowerCamelCase: Any = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
__lowerCamelCase: Dict = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
__lowerCamelCase: Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
__lowerCamelCase: List[str] = init_image.resize((512, 512) )
__lowerCamelCase: List[Any] = """CompVis/stable-diffusion-v1-4"""
__lowerCamelCase: Optional[Any] = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""scheduler""" )
__lowerCamelCase: Dict = CycleDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase: Optional[int] = """A black colored car"""
__lowerCamelCase: int = """A blue colored car"""
__lowerCamelCase: str = torch.manual_seed(0 )
__lowerCamelCase: Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
__lowerCamelCase: Any = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 189 | 1 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowercase ( __lowerCamelCase , __lowerCamelCase ):
snake_case_ = """pixel_values"""
snake_case_ = False
snake_case_ = TimmBackboneConfig
def __init__( self : Union[str, Any] ,A : Optional[int] ,**A : Union[str, Any] ):
'''simple docstring'''
requires_backends(self ,"""timm""" )
super().__init__(A )
UpperCAmelCase__ : List[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(f"backbone {config.backbone} is not supported by timm." )
if hasattr(A ,"""out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
UpperCAmelCase__ : List[Any] = getattr(A ,"""use_pretrained_backbone""" ,A )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
UpperCAmelCase__ : List[str] = config.out_indices if getattr(A ,"""out_indices""" ,A ) is not None else (-1,)
UpperCAmelCase__ : Dict = timm.create_model(
config.backbone ,pretrained=A ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=A ,**A ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
UpperCAmelCase__ : Union[str, Any] = self._backbone.return_layers
UpperCAmelCase__ : int = {layer["""module"""]: str(A ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(A )
@classmethod
def __lowercase ( cls : int ,A : Optional[Any] ,*A : int ,**A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls ,["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
UpperCAmelCase__ : str = kwargs.pop("""config""" ,TimmBackboneConfig() )
UpperCAmelCase__ : Optional[Any] = kwargs.pop("""use_timm_backbone""" ,A )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""num_channels""" ,config.num_channels )
UpperCAmelCase__ : Tuple = kwargs.pop("""features_only""" ,config.features_only )
UpperCAmelCase__ : List[str] = kwargs.pop("""use_pretrained_backbone""" ,config.use_pretrained_backbone )
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""out_indices""" ,config.out_indices )
UpperCAmelCase__ : Any = TimmBackboneConfig(
backbone=A ,num_channels=A ,features_only=A ,use_pretrained_backbone=A ,out_indices=A ,)
return super()._from_config(A ,**A )
def __lowercase ( self : Optional[int] ,A : Optional[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[Any] ,A : Optional[Any] ,A : Optional[Any]=None ,A : Any=None ,A : int=None ,**A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
UpperCAmelCase__ : Tuple = self._all_layers
UpperCAmelCase__ : Tuple = self._backbone(A ,**A )
UpperCAmelCase__ : Tuple = self._return_layers
UpperCAmelCase__ : Union[str, Any] = tuple(hidden_states[i] for i in self.out_indices )
else:
UpperCAmelCase__ : List[str] = self._backbone(A ,**A )
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : List[Any] = tuple(A )
UpperCAmelCase__ : Dict = tuple(A ) if hidden_states is not None else None
if not return_dict:
UpperCAmelCase__ : str = (feature_maps,)
if output_hidden_states:
UpperCAmelCase__ : Optional[Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=A ,hidden_states=A ,attentions=A )
| 65 | import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__a : Tuple = logging.get_logger(__name__)
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__lowercase = MaskFormerConfig(backbone_config=lowercase )
__lowercase = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = '''mapillary-vistas-id2label.json'''
__lowercase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase = {int(lowercase ): v for k, v in idalabel.items()}
return config
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = dct.pop(lowercase )
__lowercase = val
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
__lowercase = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
__lowercase = get_maskformer_config(lowercase )
# load original state_dict
with open(lowercase , '''rb''' ) as f:
__lowercase = pickle.load(lowercase )
__lowercase = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_swin_q_k_v(lowercase , config.backbone_config )
read_in_decoder_q_k_v(lowercase , lowercase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowercase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowercase )
model.eval()
for name, param in model.named_parameters():
print(lowercase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65535
else:
__lowercase = 255
__lowercase = True if '''ade''' in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowercase , reduce_labels=lowercase )
__lowercase = image_processor(lowercase , return_tensors='''pt''' )
__lowercase = model(**lowercase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
__a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__a : str = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 534 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__snake_case = "CompVis/stable-diffusion-v1-1"
__snake_case = "CompVis/stable-diffusion-v1-2"
__snake_case = "CompVis/stable-diffusion-v1-3"
__snake_case = "CompVis/stable-diffusion-v1-4"
class UpperCAmelCase ( __snake_case ):
def __init__( self : List[str] , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ):
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCamelCase = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith("""_""" )}
def lowerCamelCase_ ( self : List[str] , __magic_name__ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def lowerCamelCase_ ( self : Dict , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_0 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ):
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_0 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ):
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_0 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ):
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def lowerCamelCase_ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_0 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ):
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_1_2 , __magic_name__ : int = 5_0 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ):
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 181 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCamelCase = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCamelCase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowerCamelCase_ ( self : Any , __magic_name__ : List[str] ):
"""simple docstring"""
UpperCamelCase = """lower newer"""
UpperCamelCase = """lower newer"""
return input_text, output_text
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase = """lower"""
UpperCamelCase = ["""low""", """er</w>"""]
UpperCamelCase = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCamelCase = tokens + ["""<unk>"""]
UpperCamelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 181 | 1 |
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase_ = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
lowerCAmelCase_ = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
lowerCAmelCase_ = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A (datasets.Metric ):
def __a ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def __a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=1 , lowercase_="binary" , lowercase_=None , lowercase_="warn" , ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = recall_score(
lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_ , zero_division=lowercase_ , )
return {"recall": float(lowercase_ ) if score.size == 1 else score}
| 326 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ ,__UpperCAmelCase ,)
class A (__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = RobertaConfig
_SCREAMING_SNAKE_CASE = """roberta"""
def __init__( self , lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(lowercase_ )
_snake_case : str = RobertaEmbeddings(lowercase_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ ,__UpperCAmelCase ,)
class A (__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = RobertaConfig
_SCREAMING_SNAKE_CASE = """roberta"""
def __init__( self , lowercase_ ) -> str:
'''simple docstring'''
super().__init__(lowercase_ )
_snake_case : List[str] = config.num_labels
_snake_case : List[str] = config.num_hidden_layers
_snake_case : Dict = DeeRobertaModel(lowercase_ )
_snake_case : Dict = nn.Dropout(config.hidden_dropout_prob )
_snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase_ )
def __a ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=-1 , lowercase_=False , ) -> Optional[Any]:
'''simple docstring'''
_snake_case : Dict = self.num_layers
try:
_snake_case : Any = self.roberta(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , )
_snake_case : List[Any] = outputs[1]
_snake_case : Optional[Any] = self.dropout(lowercase_ )
_snake_case : Any = self.classifier(lowercase_ )
_snake_case : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_snake_case : Dict = e.message
_snake_case : Any = e.exit_layer
_snake_case : Tuple = outputs[0]
if not self.training:
_snake_case : Tuple = entropy(lowercase_ )
_snake_case : str = []
_snake_case : List[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_snake_case : List[str] = MSELoss()
_snake_case : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_snake_case : Tuple = CrossEntropyLoss()
_snake_case : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_snake_case : int = []
for highway_exit in outputs[-1]:
_snake_case : List[str] = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_snake_case : Optional[Any] = MSELoss()
_snake_case : List[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_snake_case : Optional[Any] = CrossEntropyLoss()
_snake_case : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase_ )
if train_highway:
_snake_case : Tuple = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_snake_case : List[str] = (loss,) + outputs
if not self.training:
_snake_case : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_snake_case : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 326 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ="""▁"""
__SCREAMING_SNAKE_CASE ={"""vocab_file""": """sentencepiece.bpe.model"""}
__SCREAMING_SNAKE_CASE ={
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
__SCREAMING_SNAKE_CASE ={
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
__SCREAMING_SNAKE_CASE =["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self: Dict , _lowerCamelCase: int , _lowerCamelCase: List[str]="<s>" , _lowerCamelCase: List[Any]="</s>" , _lowerCamelCase: Optional[int]="</s>" , _lowerCamelCase: int="<s>" , _lowerCamelCase: Tuple="<unk>" , _lowerCamelCase: Dict="<pad>" , _lowerCamelCase: int="<mask>" , _lowerCamelCase: Tuple=None , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: Optional[int]=None , _lowerCamelCase: Optional[Dict[str, Any]] = None , _lowerCamelCase: Optional[Any]=None , **_lowerCamelCase: Union[str, Any] , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenizer_file=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = len(self.sp_model )
SCREAMING_SNAKE_CASE_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowerCamelCase )
}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE_ = src_lang if src_lang is not None else '''en_XX'''
SCREAMING_SNAKE_CASE_ = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: str , _lowerCamelCase: List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _A ( self: Optional[int] ) -> Optional[Any]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _A ( self: Dict ) -> Dict:
return self._src_lang
@src_lang.setter
def _A ( self: Optional[int] , _lowerCamelCase: str ) -> Any:
SCREAMING_SNAKE_CASE_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _A ( self: int , _lowerCamelCase: List[int] , _lowerCamelCase: Optional[List[int]] = None , _lowerCamelCase: bool = False ) -> Optional[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _A ( self: List[Any] , _lowerCamelCase: List[int] , _lowerCamelCase: Optional[List[int]] = None ) -> Dict:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A ( self: List[str] , _lowerCamelCase: List[int] , _lowerCamelCase: Optional[List[int]] = None ) -> Tuple:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self: List[Any] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: str , _lowerCamelCase: Optional[str] , _lowerCamelCase: Optional[str] , **_lowerCamelCase: Union[str, Any] ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = tgt_lang_id
return inputs
def _A ( self: Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self: Union[str, Any] , _lowerCamelCase: str ) -> Union[str, Any]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _A ( self: str , _lowerCamelCase: Union[str, Any] ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _A ( self: List[str] , _lowerCamelCase: Any ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _A ( self: List[Any] , _lowerCamelCase: Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def _A ( self: int , _lowerCamelCase: str , _lowerCamelCase: Optional[str] = None ) -> Any:
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def _A ( self: List[Any] , _lowerCamelCase: List[str] , _lowerCamelCase: str = "en_XX" , _lowerCamelCase: Optional[List[str]] = None , _lowerCamelCase: str = "ro_RO" , **_lowerCamelCase: str , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def _A ( self: Dict ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def _A ( self: List[str] ) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A ( self: Any , _lowerCamelCase: Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [self.eos_token_id, self.cur_lang_code]
def _A ( self: Optional[int] , _lowerCamelCase: str ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.lang_code_to_id[lang]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [self.eos_token_id, self.cur_lang_code]
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = "realm"
def __init__( self: Tuple , _lowerCamelCase: Union[str, Any]=3_05_22 , _lowerCamelCase: Tuple=7_68 , _lowerCamelCase: str=1_28 , _lowerCamelCase: str=12 , _lowerCamelCase: int=12 , _lowerCamelCase: Union[str, Any]=8 , _lowerCamelCase: Optional[Any]=30_72 , _lowerCamelCase: str="gelu_new" , _lowerCamelCase: str=0.1 , _lowerCamelCase: Union[str, Any]=0.1 , _lowerCamelCase: Optional[int]=5_12 , _lowerCamelCase: Union[str, Any]=2 , _lowerCamelCase: int=0.02 , _lowerCamelCase: Tuple=1E-12 , _lowerCamelCase: List[Any]=2_56 , _lowerCamelCase: Any=10 , _lowerCamelCase: Optional[Any]=1E-3 , _lowerCamelCase: Any=5 , _lowerCamelCase: List[str]=3_20 , _lowerCamelCase: List[str]=13_35_37_18 , _lowerCamelCase: str=50_00 , _lowerCamelCase: str=1 , _lowerCamelCase: str=0 , _lowerCamelCase: Dict=2 , **_lowerCamelCase: Tuple , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
# Common config
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = retriever_proj_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_candidates
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE_ = span_hidden_size
SCREAMING_SNAKE_CASE_ = max_span_width
SCREAMING_SNAKE_CASE_ = reader_layer_norm_eps
SCREAMING_SNAKE_CASE_ = reader_beam_size
SCREAMING_SNAKE_CASE_ = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE_ = num_block_records
SCREAMING_SNAKE_CASE_ = searcher_beam_size
| 89 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
A: List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : list[list[str]] , lowerCamelCase__ : int , ):
'''simple docstring'''
A: Union[str, Any] = len(lowerCamelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCamelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCamelCase__ , lowerCamelCase__ , )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int ):
'''simple docstring'''
A: list[list[str]] = []
depth_first_search([] , [] , [] , lowerCamelCase__ , lowerCamelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCamelCase__ )
print("""""" )
print(len(lowerCamelCase__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 135 | 1 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any]):
# Construct model
if gpta_config_file == "":
A_ : Optional[Any] = GPTaConfig()
else:
A_ : Optional[Any] = GPTaConfig.from_json_file(lowerCamelCase)
A_ : Dict = GPTaModel(lowerCamelCase)
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model
A_ : int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A_ : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}')
torch.save(model.state_dict() , lowerCamelCase)
print(F'Save configuration file to {pytorch_config_dump_path}')
with open(lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__magic_name__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 665 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase__( unittest.TestCase ):
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = BlipImageProcessor()
__lowercase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__lowercase = BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , **__UpperCAmelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def __magic_name__ ( self , **__UpperCAmelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def __magic_name__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
__lowercase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(__UpperCAmelCase , return_tensors="""np""" )
__lowercase = processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowercase = """lower newer"""
__lowercase = processor(text=__UpperCAmelCase )
__lowercase = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowercase = """lower newer"""
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(__UpperCAmelCase )
__lowercase = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowercase = """lower newer"""
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 713 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
snake_case : List[Any] = {'UserAgent': UserAgent().random}
def lowercase__ ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = script.contents[0]
__lowercase = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = F'''https://www.instagram.com/{username}/'''
__lowercase = self.get_json()
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = requests.get(self.url , headers=__UpperCAmelCase ).text
__lowercase = BeautifulSoup(__UpperCAmelCase , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
"""simple docstring"""
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
"""simple docstring"""
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["username"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["full_name"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["biography"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["business_email"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["external_url"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["is_verified"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["is_private"]
def lowercase__ ( __UpperCamelCase : str = "github" ):
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
__lowercase = InstagramUser(__UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : int = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 339 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCamelCase__ = "laptop" ) -> DataFrame:
"""simple docstring"""
A = f'https://www.amazon.in/laptop/s?k={product}'
A = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
A = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
A = item.ha.text
A = 'https://www.amazon.in/' + item.ha.a['href']
A = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
A = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
A = 'Not available'
try:
A = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
A = ''
try:
A = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
A = float('nan' )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = ' '
A = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : Any = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 690 | 1 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =(UnCLIPScheduler,)
def _lowerCamelCase ( self : List[Any] , **__A : Union[str, Any] ):
__UpperCamelCase = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**__A )
return config
def _lowerCamelCase ( self : str ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__A )
def _lowerCamelCase ( self : str ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__A )
def _lowerCamelCase ( self : Optional[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def _lowerCamelCase ( self : int ):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=__A )
def _lowerCamelCase ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__A )
def _lowerCamelCase ( self : List[Any] ):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__A , prev_timestep=__A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(variance_type='fixed_small_log' )
__UpperCamelCase = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1e-5
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(variance_type='learned_range' )
__UpperCamelCase = scheduler_class(**__A )
__UpperCamelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=__A ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=__A ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=__A ) - -0.001_0011 < 1e-5
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**__A )
__UpperCamelCase = scheduler.timesteps
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
__UpperCamelCase = model(__A , __A )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(__A ) )
__UpperCamelCase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**__A )
scheduler.set_timesteps(2_5 )
__UpperCamelCase = scheduler.timesteps
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
__UpperCamelCase = model(__A , __A )
if i + 1 == timesteps.shape[0]:
__UpperCamelCase = None
else:
__UpperCamelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(
__A , __A , __A , prev_timestep=__A , generator=__A ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(__A ) )
__UpperCamelCase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def _lowerCamelCase ( self : Optional[Any] ):
pass
def _lowerCamelCase ( self : Any ):
pass
| 707 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowercase__ ( __lowercase : SplitDict ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
__UpperCamelCase = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
__UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] )
def lowercase__ ( __lowercase : List[str] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 434 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _UpperCAmelCase ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
UpperCAmelCase__ =symbols(_snake_case )
UpperCAmelCase__ =lambdify(_snake_case , _snake_case )
UpperCAmelCase__ =lambdify(_snake_case , diff(_snake_case , _snake_case ) )
UpperCAmelCase__ =starting_point
while True:
if diff_function(_snake_case ) != 0:
UpperCAmelCase__ =prev_guess - multiplicity * func(_snake_case ) / diff_function(
_snake_case )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCAmelCase__ =next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 625 | """simple docstring"""
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCamelCase__ = len(bin(_snake_case )[3:] )
UpperCamelCase__ = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
UpperCamelCase__ = (
(
"1"
+ "0" * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 516 | 0 |
'''simple docstring'''
import numpy as np
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case: int =int(np.ceil((x_end - xa) / h ) )
snake_case: Optional[int] =np.zeros((n + 1,) )
snake_case: Optional[int] =ya
snake_case: List[str] =xa
for k in range(__UpperCAmelCase ):
snake_case: Optional[int] =f(__UpperCAmelCase , y[k] )
snake_case: Optional[Any] =f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case: Optional[Any] =f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case: Optional[Any] =f(x + h , y[k] + h * ka )
snake_case: List[Any] =y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( snake_case ):
UpperCAmelCase : Any = """deformable_detr"""
UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Dict , a_ : Optional[int]=True , a_ : Optional[int]=None , a_ : Tuple=3 , a_ : Optional[int]=3_0_0 , a_ : Tuple=1_0_2_4 , a_ : Union[str, Any]=6 , a_ : List[str]=1_0_2_4 , a_ : Union[str, Any]=8 , a_ : Any=6 , a_ : int=1_0_2_4 , a_ : Tuple=8 , a_ : Dict=0.0 , a_ : Union[str, Any]=True , a_ : Optional[int]="relu" , a_ : int=2_5_6 , a_ : Any=0.1 , a_ : Optional[int]=0.0 , a_ : Tuple=0.0 , a_ : Union[str, Any]=0.0_2 , a_ : List[Any]=1.0 , a_ : Optional[int]=True , a_ : Tuple=False , a_ : Optional[Any]="sine" , a_ : Optional[int]="resnet50" , a_ : List[str]=True , a_ : List[Any]=False , a_ : List[str]=4 , a_ : Tuple=4 , a_ : str=4 , a_ : Tuple=False , a_ : Optional[int]=3_0_0 , a_ : int=False , a_ : Optional[Any]=1 , a_ : int=5 , a_ : Optional[int]=2 , a_ : str=1 , a_ : Optional[int]=1 , a_ : Optional[Any]=5 , a_ : Optional[Any]=2 , a_ : List[Any]=0.1 , a_ : List[Any]=0.2_5 , a_ : Dict=False , **a_ : str , ) -> Dict:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
snake_case: Union[str, Any] =CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(a_ , a_ ):
snake_case: Dict =backbone_config.get('model_type' )
snake_case: List[Any] =CONFIG_MAPPING[backbone_model_type]
snake_case: str =config_class.from_dict(a_ )
snake_case: List[str] =use_timm_backbone
snake_case: Optional[int] =backbone_config
snake_case: List[Any] =num_channels
snake_case: List[Any] =num_queries
snake_case: List[Any] =max_position_embeddings
snake_case: str =d_model
snake_case: Tuple =encoder_ffn_dim
snake_case: str =encoder_layers
snake_case: List[Any] =encoder_attention_heads
snake_case: Optional[Any] =decoder_ffn_dim
snake_case: Union[str, Any] =decoder_layers
snake_case: int =decoder_attention_heads
snake_case: Union[str, Any] =dropout
snake_case: Optional[int] =attention_dropout
snake_case: Any =activation_dropout
snake_case: Dict =activation_function
snake_case: Union[str, Any] =init_std
snake_case: Union[str, Any] =init_xavier_std
snake_case: Tuple =encoder_layerdrop
snake_case: List[str] =auxiliary_loss
snake_case: Dict =position_embedding_type
snake_case: Any =backbone
snake_case: Union[str, Any] =use_pretrained_backbone
snake_case: List[Any] =dilation
# deformable attributes
snake_case: Union[str, Any] =num_feature_levels
snake_case: str =encoder_n_points
snake_case: str =decoder_n_points
snake_case: Any =two_stage
snake_case: Dict =two_stage_num_proposals
snake_case: str =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
snake_case: Dict =class_cost
snake_case: Dict =bbox_cost
snake_case: int =giou_cost
# Loss coefficients
snake_case: Dict =mask_loss_coefficient
snake_case: int =dice_loss_coefficient
snake_case: List[str] =bbox_loss_coefficient
snake_case: Tuple =giou_loss_coefficient
snake_case: Union[str, Any] =eos_coefficient
snake_case: List[Any] =focal_alpha
snake_case: str =disable_custom_kernels
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase ( self : List[str] ) -> int:
return self.d_model
def UpperCamelCase ( self : str ) -> Optional[Any]:
snake_case: Dict =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case: Union[str, Any] =self.backbone_config.to_dict()
snake_case: Dict =self.__class__.model_type
return output
| 347 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
A_ , A_ , A_ : Union[str, Any] = False, False, False
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCamelCase__ = field(default='''Audio''' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
return self.pa_type
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"bytes": None, "path": value}
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case__ : Tuple = BytesIO()
sf.write(__SCREAMING_SNAKE_CASE , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case__ : List[str] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
snake_case__ : Tuple = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_2_7_6_7
snake_case__ : str = BytesIO(bytes() )
sf.write(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
snake_case__ , snake_case__ : str = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
snake_case__ : Optional[Any] = xsplitext(__SCREAMING_SNAKE_CASE )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
snake_case__ : str = token_per_repo_id or {}
snake_case__ : Tuple = path.split("""::""" )[-1]
try:
snake_case__ : str = string_to_dict(__SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["""repo_id"""]
snake_case__ : int = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case__ : Dict = None
with xopen(__SCREAMING_SNAKE_CASE , """rb""" , use_auth_token=__SCREAMING_SNAKE_CASE ) as f:
snake_case__ , snake_case__ : Optional[int] = sf.read(__SCREAMING_SNAKE_CASE )
else:
snake_case__ , snake_case__ : Tuple = sf.read(__SCREAMING_SNAKE_CASE )
snake_case__ : str = array.T
if self.mono:
snake_case__ : str = librosa.to_mono(__SCREAMING_SNAKE_CASE )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case__ : List[Any] = librosa.resample(__SCREAMING_SNAKE_CASE , orig_sr=__SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate )
snake_case__ : List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __UpperCamelCase ( self ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if pa.types.is_string(storage.type ):
snake_case__ : List[str] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
snake_case__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case__ : List[str] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
snake_case__ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
snake_case__ : Dict = pa.array([Audio().encode_example(__SCREAMING_SNAKE_CASE ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
snake_case__ : Tuple = storage.field("""bytes""" )
else:
snake_case__ : Any = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
snake_case__ : List[Any] = storage.field("""path""" )
else:
snake_case__ : Union[str, Any] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
snake_case__ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
@no_op_if_value_is_null
def path_to_bytes(__SCREAMING_SNAKE_CASE ):
with xopen(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
snake_case__ : int = f.read()
return bytes_
snake_case__ : Optional[int] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case__ : Optional[Any] = pa.array(
[os.path.basename(__SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
snake_case__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
| 38 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase : List[str] = logging.getLogger(__name__)
UpperCAmelCase : Dict = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase : List[str] = {"facebook/bart-base": BartTokenizer}
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=__lowerCAmelCase , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=__lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__lowerCAmelCase , )
parser.add_argument(
"""--config_name""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=__lowerCAmelCase , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Where to store the final ONNX file.""" )
lowercase_ = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase="cpu" ) -> int:
'''simple docstring'''
lowercase_ = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
lowercase_ = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase )
if model_name in ["facebook/bart-base"]:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
return huggingface_model, tokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
model.eval()
lowercase_ = None
lowercase_ = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) )
with torch.no_grad():
lowercase_ = """My friends are cool but they eat too many carbs."""
lowercase_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="""pt""" ).to(model.device )
lowercase_ = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=__lowerCAmelCase , )
logger.info("""Model exported to {}""".format(__lowerCAmelCase ) )
lowercase_ = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(__lowerCAmelCase ) )
lowercase_ = onnxruntime.InferenceSession(__lowerCAmelCase )
lowercase_ = ort_sess.run(
__lowerCAmelCase , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(__lowerCAmelCase ),
"""max_length""": np.array(__lowerCAmelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = parse_args()
lowercase_ = 5
lowercase_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowercase_ = torch.device(args.device )
lowercase_ , lowercase_ = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(__lowerCAmelCase )
if args.max_length:
lowercase_ = args.max_length
if args.num_beams:
lowercase_ = args.num_beams
if args.output_file_path:
lowercase_ = args.output_file_path
else:
lowercase_ = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 716 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCAmelCase ):
lowercase__ = ["keras_nlp"]
def __init__( self : Dict , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ["""keras_nlp"""])
| 100 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : str ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = checkpoint
_lowerCAmelCase = {}
_lowerCAmelCase = vae_state_dict["""encoder.conv_in.weight"""]
_lowerCAmelCase = vae_state_dict["""encoder.conv_in.bias"""]
_lowerCAmelCase = vae_state_dict["""encoder.conv_out.weight"""]
_lowerCAmelCase = vae_state_dict["""encoder.conv_out.bias"""]
_lowerCAmelCase = vae_state_dict["""encoder.norm_out.weight"""]
_lowerCAmelCase = vae_state_dict["""encoder.norm_out.bias"""]
_lowerCAmelCase = vae_state_dict["""decoder.conv_in.weight"""]
_lowerCAmelCase = vae_state_dict["""decoder.conv_in.bias"""]
_lowerCAmelCase = vae_state_dict["""decoder.conv_out.weight"""]
_lowerCAmelCase = vae_state_dict["""decoder.conv_out.bias"""]
_lowerCAmelCase = vae_state_dict["""decoder.norm_out.weight"""]
_lowerCAmelCase = vae_state_dict["""decoder.norm_out.bias"""]
_lowerCAmelCase = vae_state_dict["""quant_conv.weight"""]
_lowerCAmelCase = vae_state_dict["""quant_conv.bias"""]
_lowerCAmelCase = vae_state_dict["""post_quant_conv.weight"""]
_lowerCAmelCase = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
_lowerCAmelCase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
_lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
_lowerCAmelCase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
_lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
_lowerCAmelCase = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
_lowerCAmelCase = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
_lowerCAmelCase = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
_lowerCAmelCase = renew_vae_resnet_paths(snake_case_ )
_lowerCAmelCase = {"""old""": F"""down.{i}.block""", """new""": F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
_lowerCAmelCase = [key for key in vae_state_dict if """encoder.mid.block""" in key]
_lowerCAmelCase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCAmelCase = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
_lowerCAmelCase = renew_vae_resnet_paths(snake_case_ )
_lowerCAmelCase = {"""old""": F"""mid.block_{i}""", """new""": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
_lowerCAmelCase = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
_lowerCAmelCase = renew_vae_attention_paths(snake_case_ )
_lowerCAmelCase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
_lowerCAmelCase = num_up_blocks - 1 - i
_lowerCAmelCase = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
_lowerCAmelCase = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
_lowerCAmelCase = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
_lowerCAmelCase = renew_vae_resnet_paths(snake_case_ )
_lowerCAmelCase = {"""old""": F"""up.{block_id}.block""", """new""": F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
_lowerCAmelCase = [key for key in vae_state_dict if """decoder.mid.block""" in key]
_lowerCAmelCase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCAmelCase = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
_lowerCAmelCase = renew_vae_resnet_paths(snake_case_ )
_lowerCAmelCase = {"""old""": F"""mid.block_{i}""", """new""": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
_lowerCAmelCase = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
_lowerCAmelCase = renew_vae_attention_paths(snake_case_ )
_lowerCAmelCase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , ) -> int:
"""simple docstring"""
_lowerCAmelCase = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
_lowerCAmelCase = io.BytesIO(r.content )
_lowerCAmelCase = OmegaConf.load(snake_case_ )
_lowerCAmelCase = 512
_lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
_lowerCAmelCase = {}
with safe_open(snake_case_ , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
_lowerCAmelCase = f.get_tensor(snake_case_ )
else:
_lowerCAmelCase = torch.load(snake_case_ , map_location=snake_case_ )["""state_dict"""]
# Convert the VAE model.
_lowerCAmelCase = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
_lowerCAmelCase = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
_lowerCAmelCase = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 156 |
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 156 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 | import numpy as np
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float = 1E-12 , _SCREAMING_SNAKE_CASE : int = 100 , ):
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_SCREAMING_SNAKE_CASE ) == np.iscomplexobj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[Any] = np.iscomplexobj(_SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : str = 0
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : List[str] = 1E12
while not convergence:
# Multiple matrix by the vector.
UpperCamelCase_ : Optional[Any] = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
UpperCamelCase_ : Union[str, Any] = w / np.linalg.norm(_SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCamelCase_ : Tuple = vector.conj().T if is_complex else vector.T
UpperCamelCase_ : Tuple = np.dot(_SCREAMING_SNAKE_CASE , np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Check convergence.
UpperCamelCase_ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCamelCase_ : List[Any] = True
UpperCamelCase_ : Dict = lambda_
if is_complex:
UpperCamelCase_ : List[Any] = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Tuple = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCamelCase_ : List[Any] = np.array([41, 4, 20] )
UpperCamelCase_ : str = real_input_matrix.astype(np.complexaaa )
UpperCamelCase_ : str = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCamelCase_ : List[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCamelCase_ : Tuple = real_input_matrix
UpperCamelCase_ : Any = real_vector
elif problem_type == "complex":
UpperCamelCase_ : List[Any] = complex_input_matrix
UpperCamelCase_ : List[str] = complex_vector
# Our implementation.
UpperCamelCase_,UpperCamelCase_ : List[Any] = power_iteration(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCamelCase_,UpperCamelCase_ : int = np.linalg.eigh(_SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
UpperCamelCase_ : Union[str, Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCamelCase_ : int = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_SCREAMING_SNAKE_CASE ) - np.abs(_SCREAMING_SNAKE_CASE ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 138 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[Any] = 'big_bird'
def __init__( self : str , _lowerCAmelCase : Any=5_0358 , _lowerCAmelCase : Optional[int]=768 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : Optional[Any]=3072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Optional[int]=4096 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[Any]=1e-12 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=0 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Optional[int]=66 , _lowerCAmelCase : Dict="block_sparse" , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : str=64 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , sep_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = rescale_embeddings
__lowercase = attention_type
__lowercase = use_bias
__lowercase = block_size
__lowercase = num_random_blocks
__lowercase = classifier_dropout
class __UpperCamelCase ( _lowerCAmelCase ):
@property
def _a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 80 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class lowercase :
def __init__( self , lowercase=None , **lowercase ) -> int:
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
lowerCAmelCase = model
lowerCAmelCase = kwargs.get("""model_save_dir""" , lowercase )
lowerCAmelCase = kwargs.get("""latest_model_name""" , lowercase )
def __call__( self , **lowercase ) -> List[Any]:
lowerCAmelCase = {k: np.array(lowercase ) for k, v in kwargs.items()}
return self.model.run(lowercase , lowercase )
@staticmethod
def _snake_case ( lowercase , lowercase=None , lowercase=None ) -> Optional[Any]:
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
lowerCAmelCase = """CPUExecutionProvider"""
return ort.InferenceSession(lowercase , providers=[provider] , sess_options=lowercase )
def _snake_case ( self , lowercase , lowercase = None , **lowercase ) -> str:
lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase = self.model_save_dir.joinpath(self.latest_model_name )
lowerCAmelCase = Path(lowercase ).joinpath(lowercase )
try:
shutil.copyfile(lowercase , lowercase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase = self.model_save_dir.joinpath(lowercase )
if src_path.exists():
lowerCAmelCase = Path(lowercase ).joinpath(lowercase )
try:
shutil.copyfile(lowercase , lowercase )
except shutil.SameFileError:
pass
def _snake_case ( self , lowercase , **lowercase , ) -> str:
if os.path.isfile(lowercase ):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(lowercase , exist_ok=lowercase )
# saving model weights/files
self._save_pretrained(lowercase , **lowercase )
@classmethod
def _snake_case ( cls , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> Union[str, Any]:
lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase ):
lowerCAmelCase = OnnxRuntimeModel.load_model(
os.path.join(lowercase , lowercase ) , provider=lowercase , sess_options=lowercase )
lowerCAmelCase = Path(lowercase )
# load model from hub
else:
# download model
lowerCAmelCase = hf_hub_download(
repo_id=lowercase , filename=lowercase , use_auth_token=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , )
lowerCAmelCase = Path(lowercase ).parent
lowerCAmelCase = Path(lowercase ).name
lowerCAmelCase = OnnxRuntimeModel.load_model(lowercase , provider=lowercase , sess_options=lowercase )
return cls(model=lowercase , **lowercase )
@classmethod
def _snake_case ( cls , lowercase , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> List[str]:
lowerCAmelCase = None
if len(str(lowercase ).split("""@""" ) ) == 2:
lowerCAmelCase , lowerCAmelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , use_auth_token=lowercase , **lowercase , )
| 532 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a__: Any = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
a__: List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase__( )->Dict:
A__ = '''https://pypi.org/pypi/diffusers/json'''
A__ = json.loads(request.urlopen(UpperCamelCase__ ).read() )['''releases'''].keys()
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : version.Version(UpperCamelCase__ ) )
def UpperCamelCase__( )->List[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ = Path(UpperCamelCase__ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def UpperCamelCase__( UpperCamelCase__ : Union[str, os.PathLike] )->Union[str, Any]:
init_hf_modules()
A__ = Path(UpperCamelCase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] )->str:
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
A__ = f.read()
# Imports of the form `import .xxx`
A__ = re.findall('''^\s*import\s+\.(\S+)\s*$''' , UpperCamelCase__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , UpperCamelCase__ , flags=re.MULTILINE )
# Unique-ify
return list(set(UpperCamelCase__ ) )
def UpperCamelCase__( UpperCamelCase__ : Dict )->str:
A__ = False
A__ = [module_file]
A__ = []
# Let's recurse through all relative imports
while not no_change:
A__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(UpperCamelCase__ ) )
A__ = Path(UpperCamelCase__ ).parent
A__ = [str(module_path / m ) for m in new_imports]
A__ = [f for f in new_import_files if f not in all_relative_imports]
A__ = [f"{f}.py" for f in new_import_files]
A__ = len(UpperCamelCase__ ) == 0
all_relative_imports.extend(UpperCamelCase__ )
return all_relative_imports
def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->List[Any]:
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
A__ = f.read()
# Imports of the form `import xxx`
A__ = re.findall('''^\s*import\s+(\S+)\s*$''' , UpperCamelCase__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , UpperCamelCase__ , flags=re.MULTILINE )
# Only keep the top-level module
A__ = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
A__ = list(set(UpperCamelCase__ ) )
A__ = []
for imp in imports:
try:
importlib.import_module(UpperCamelCase__ )
except ImportError:
missing_packages.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f"{', '.join(UpperCamelCase__ )}. Run `pip install {' '.join(UpperCamelCase__ )}`" )
return get_relative_imports(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple )->Optional[int]:
A__ = module_path.replace(os.path.sep , '''.''' )
A__ = importlib.import_module(UpperCamelCase__ )
if class_name is None:
return find_pipeline_class(UpperCamelCase__ )
return getattr(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] )->int:
from ..pipelines import DiffusionPipeline
A__ = dict(inspect.getmembers(UpperCamelCase__ , inspect.isclass ) )
A__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , UpperCamelCase__ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
f" {loaded_module}." )
A__ = cls
return pipeline_class
def UpperCamelCase__( UpperCamelCase__ : Union[str, os.PathLike] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[Dict[str, str]] = None , UpperCamelCase__ : Optional[Union[bool, str]] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , )->List[Any]:
A__ = str(UpperCamelCase__ )
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.isfile(UpperCamelCase__ ):
A__ = module_file_or_url
A__ = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
A__ = get_diffusers_versions()
# cut ".dev0"
A__ = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
A__ = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
A__ = f"v{revision}"
elif revision == "main":
A__ = revision
else:
raise ValueError(
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
f" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
A__ = COMMUNITY_PIPELINES_URL.format(revision=UpperCamelCase__ , pipeline=UpperCamelCase__ )
try:
A__ = cached_download(
UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , proxies=UpperCamelCase__ , resume_download=UpperCamelCase__ , local_files_only=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , )
A__ = '''git'''
A__ = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
A__ = hf_hub_download(
UpperCamelCase__ , UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , proxies=UpperCamelCase__ , resume_download=UpperCamelCase__ , local_files_only=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , )
A__ = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
A__ = check_imports(UpperCamelCase__ )
# Now we move the module inside our cached dynamic modules.
A__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(UpperCamelCase__ )
A__ = Path(UpperCamelCase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(UpperCamelCase__ , submodule_path / module_file )
for module_needed in modules_needed:
A__ = f"{module_needed}.py"
shutil.copy(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = use_auth_token
elif use_auth_token is True:
A__ = HfFolder.get_token()
else:
A__ = None
A__ = model_info(UpperCamelCase__ , revision=UpperCamelCase__ , token=UpperCamelCase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A__ = submodule_path / commit_hash
A__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(UpperCamelCase__ )
if not (submodule_path / module_file).exists():
shutil.copy(UpperCamelCase__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
UpperCamelCase__ , f"{module_needed}.py" , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , resume_download=UpperCamelCase__ , proxies=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , local_files_only=UpperCamelCase__ , )
return os.path.join(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Union[str, os.PathLike] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[Dict[str, str]] = None , UpperCamelCase__ : Optional[Union[bool, str]] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , )->Optional[Any]:
A__ = get_cached_module_file(
UpperCamelCase__ , UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , resume_download=UpperCamelCase__ , proxies=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , local_files_only=UpperCamelCase__ , )
return get_class_in_module(UpperCamelCase__ , final_module.replace('''.py''' , '''''' ) )
| 212 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def UpperCamelCase ( *__lowerCamelCase,**__lowerCamelCase ):
pass
def UpperCamelCase__( UpperCamelCase__ : Any )->List[str]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a__: Optional[int] = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = pipeline(
'''document-question-answering''',model=__lowerCamelCase,tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = INVOICE_URL
A__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ),__lowerCamelCase,'''''' ) ) )
A__ = '''What is the placebo?'''
A__ = [
{
'''image''': load_image(__lowerCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = dqa_pipeline(__lowerCamelCase,top_k=2 )
self.assertEqual(
__lowerCamelCase,[
[
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
]
]
* 3,)
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase ( self ):
A__ = pipeline('''document-question-answering''',model='''hf-internal-testing/tiny-random-layoutlmv2''' )
A__ = INVOICE_URL
A__ = '''How many cats are there?'''
A__ = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase,decimals=4 ),__lowerCamelCase )
A__ = dqa_pipeline({'''image''': image, '''question''': question},top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase,decimals=4 ),__lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
A__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(__lowerCamelCase,[] )
# We can optionnally pass directly the words and bounding boxes
A__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A__ = []
A__ = []
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,words=__lowerCamelCase,boxes=__lowerCamelCase,top_k=2 )
self.assertEqual(__lowerCamelCase,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase ( self ):
A__ = pipeline(
'''document-question-answering''',model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''',revision='''9977165''',)
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
A__ = dqa_pipeline({'''image''': image, '''question''': question},top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}],top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase ( self ):
A__ = pipeline(
'''document-question-answering''',model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''',revision='''9977165''',max_seq_len=50,)
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
A__ = dqa_pipeline({'''image''': image, '''question''': question},top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}],top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2,)
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''',revision='''3dc6de3''',add_prefix_space=__lowerCamelCase )
A__ = pipeline(
'''document-question-answering''',model='''impira/layoutlm-document-qa''',tokenizer=__lowerCamelCase,revision='''3dc6de3''',)
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
],)
A__ = dqa_pipeline({'''image''': image, '''question''': question},top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
],)
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}],top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2,)
A__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ),__lowerCamelCase,'''''' ) ) )
# This model should also work if `image` is set to None
A__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question},top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
],)
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''',revision='''3dc6de3''',add_prefix_space=__lowerCamelCase )
A__ = pipeline(
'''document-question-answering''',model='''impira/layoutlm-document-qa''',tokenizer=__lowerCamelCase,revision='''3dc6de3''',max_seq_len=50,)
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}],top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2,)
A__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ),__lowerCamelCase,'''''' ) ) )
# This model should also work if `image` is set to None
A__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question},top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
@slow
@require_torch
def UpperCamelCase ( self ):
A__ = pipeline(
'''document-question-answering''',model='''naver-clova-ix/donut-base-finetuned-docvqa''',tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ),feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''',)
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase,decimals=4 ),[{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def UpperCamelCase ( self ):
pass
| 212 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case : Dict = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
snake_case : List[Any] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
snake_case : Dict = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
snake_case : List[Any] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class lowerCamelCase__( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Optional[int] = FunnelTokenizer
UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : str = 2
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase="##" , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , clean_text=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , wordpieces_prefix=snake_case__ , **snake_case__ , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case__ ) != tokenize_chinese_chars
):
__lowercase = getattr(snake_case__ , normalizer_state.pop("""type""" ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**snake_case__ )
__lowercase = do_lower_case
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
"""simple docstring"""
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 566 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase__(A = "" ) ->dict[str, float]:
"""simple docstring"""
lowercase__ : Dict= url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowercase__ : Tuple= BeautifulSoup(requests.get(A ).text , "html.parser" )
lowercase__ : Optional[Any]= soup.find_all("td" , attrs="titleColumn" )
lowercase__ : Dict= soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(A , A )
}
def lowercase__(A = "IMDb_Top_250_Movies.csv" ) ->None:
"""simple docstring"""
lowercase__ : str= get_imdb_top_aaa_movies()
with open(A , "w" , newline="" ) as out_file:
lowercase__ : int= csv.writer(A )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 218 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE__ : List[str] = ["""image"""]
SCREAMING_SNAKE_CASE__ : List[str] = ["""image"""]
SCREAMING_SNAKE_CASE__ : int = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE__ : str = False
@property
def lowercase_ ( self ) -> Any:
return 3_2
@property
def lowercase_ ( self ) -> List[str]:
return 3_2
@property
def lowercase_ ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def lowercase_ ( self ) -> Dict:
return 8
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase_ : int = CLIPVisionModel(__lowercase )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCAmelCase_ : int = PriorTransformer(**__lowercase )
return model
@property
def lowercase_ ( self ) -> Dict:
torch.manual_seed(0 )
lowerCAmelCase_ : int = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase_ : Tuple = ShapERenderer(**__lowercase )
return model
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = self.dummy_prior
lowerCAmelCase_ : int = self.dummy_image_encoder
lowerCAmelCase_ : Optional[Any] = self.dummy_image_processor
lowerCAmelCase_ : List[str] = self.dummy_renderer
lowerCAmelCase_ : Optional[Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_0_2_4 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
lowerCAmelCase_ : Tuple = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase_ ( self , __lowercase , __lowercase=0 ) -> Optional[Any]:
lowerCAmelCase_ : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(__lowercase )
else:
lowerCAmelCase_ : Any = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowerCAmelCase_ : Dict = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[int] = '''cpu'''
lowerCAmelCase_ : Tuple = self.get_dummy_components()
lowerCAmelCase_ : Any = self.pipeline_class(**__lowercase )
lowerCAmelCase_ : Optional[int] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Union[str, Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
lowerCAmelCase_ : Optional[int] = output.images[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
lowerCAmelCase_ : str = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = torch_device == '''cpu'''
lowerCAmelCase_ : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase_ : Any = self.pipeline_class(**__lowercase )
lowerCAmelCase_ : Optional[Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : str = 1
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : Tuple = self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase_ : Dict = batch_size * [inputs[key]]
lowerCAmelCase_ : List[Any] = pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCAmelCase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCAmelCase_ : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCAmelCase_ : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : str = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='''np''' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 716 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCamelCase_ = Features({'text': Value('string' )} )
UpperCamelCase_ = Features({} )
UpperCamelCase_ = "text"
@property
def UpperCAmelCase_ ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 659 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = HfArgumentParser(snake_case__)
lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : List[Any] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : int = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 659 | 1 |
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'{price_plus_tax(100, 0.25) = }')
print(F'{price_plus_tax(125.50, 0.05) = }')
| 700 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
__lowercase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase : Any = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_snake_case ) , torch_builtin(_snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(_snake_case ) , gelu_new(_snake_case ) ) )
def snake_case_ ( self : Dict ):
__lowercase : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase : List[str] = get_activation('''gelu''' )
__lowercase : Optional[Any] = get_activation('''gelu_10''' )
__lowercase : Tuple = torch_builtin(_snake_case )
__lowercase : List[Any] = geluaa(_snake_case )
__lowercase : Tuple = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case_ ( self : Any ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_snake_case ):
get_activation('''bogus''' )
with self.assertRaises(_snake_case ):
get_activation(_snake_case )
def snake_case_ ( self : Dict ):
__lowercase : Union[str, Any] = get_activation('''gelu''' )
__lowercase : List[str] = 1
__lowercase : Union[str, Any] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_snake_case ):
__lowercase : Tuple = acta.a
| 284 | 0 |
def a_ ( lowerCAmelCase_ : int = 100_0000 ):
__lowerCAmelCase = limit + 1
__lowerCAmelCase = [0] * limit
for first_term in range(1, lowerCAmelCase_ ):
for n in range(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : int = 16
__lowerCamelCase : Tuple = 32
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = 1_6 ) -> int:
A__ : Tuple =AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Union[str, Any] =DatasetDict(
{
'''train''': dataset['''train'''].select(snake_case_ ),
'''validation''': dataset['''train'''].select(snake_case_ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(snake_case_ ):
# max_length=None => use the model max length (it's actually the default)
A__ : int =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[Any] =datasets.map(
snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : Tuple =tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(snake_case_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[int] =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : Tuple =1_6
elif accelerator.mixed_precision != "no":
A__ : List[Any] =8
else:
A__ : Any =None
return tokenizer.pad(
snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', )
# Instantiate dataloaders.
A__ : Optional[int] =DataLoader(
tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
A__ : List[Any] =DataLoader(
tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
A__ : List[str] =DataLoader(
tokenized_datasets['''test'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
return train_dataloader, eval_dataloader, test_dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
# New Code #
A__ : Union[str, Any] =[]
# Download the dataset
A__ : Optional[int] =load_dataset('''glue''', '''mrpc''' )
# Create our splits
A__ : List[Any] =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ : Dict =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Any =config['''lr''']
A__ : Optional[Any] =int(config['''num_epochs'''] )
A__ : Dict =int(config['''seed'''] )
A__ : Tuple =int(config['''batch_size'''] )
A__ : Union[str, Any] =evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Any =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : Optional[Any] =batch_size // MAX_GPU_BATCH_SIZE
A__ : Any =MAX_GPU_BATCH_SIZE
set_seed(snake_case_ )
# New Code #
# Create our folds:
A__ : List[str] =kfold.split(np.zeros(datasets['''train'''].num_rows ), datasets['''train''']['''label'''] )
A__ : Any =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(snake_case_ ):
A__ , A__ , A__ : str =get_fold_dataloaders(
snake_case_, snake_case_, snake_case_, snake_case_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Tuple =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : str =model.to(accelerator.device )
# Instantiate optimizer
A__ : Tuple =AdamW(params=model.parameters(), lr=snake_case_ )
# Instantiate scheduler
A__ : List[Any] =get_linear_schedule_with_warmup(
optimizer=snake_case_, num_warmup_steps=1_0_0, num_training_steps=(len(snake_case_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Tuple =accelerator.prepare(
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : str =model(**snake_case_ )
A__ : List[Any] =outputs.loss
A__ : int =loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Dict =model(**snake_case_ )
A__ : Any =outputs.logits.argmax(dim=-1 )
A__ , A__ : List[str] =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case_, references=snake_case_, )
A__ : Dict =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:', snake_case_ )
# New Code #
# We also run predictions on the test set at the very end
A__ : str =[]
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : str =model(**snake_case_ )
A__ : Dict =outputs.logits
A__ , A__ : Any =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(snake_case_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ : List[str] =torch.cat(snake_case_, dim=0 )
A__ : Union[str, Any] =torch.stack(snake_case_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ : List[Any] =metric.compute(predictions=snake_case_, references=snake_case_ )
accelerator.print('''Average test metrics from all folds:''', snake_case_ )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
A__ : int =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''', type=snake_case_, default=3, help='''The number of splits to perform across the dataset''' )
A__ : List[str] =parser.parse_args()
A__ : Optional[Any] ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(snake_case_, snake_case_ )
if __name__ == "__main__":
main()
| 416 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 4_2
SCREAMING_SNAKE_CASE_ = 4_2
SCREAMING_SNAKE_CASE_ = 4_2
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 4_2
SCREAMING_SNAKE_CASE_ = 4_2
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'train'
SCREAMING_SNAKE_CASE_ = 'dev'
SCREAMING_SNAKE_CASE_ = 'test'
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def __lowerCamelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCamelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCamelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=-1_00 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=True , ):
"""simple docstring"""
_snake_case : str = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}
_snake_case : Dict = []
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE__ ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
_snake_case : int = []
_snake_case : Optional[int] = []
for word, label in zip(example.words , example.labels ):
_snake_case : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE__ ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_snake_case : int = tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE__ ) > max_seq_length - special_tokens_count:
_snake_case : Dict = tokens[: (max_seq_length - special_tokens_count)]
_snake_case : List[str] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_snake_case : str = [sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_snake_case : int = [cls_token] + tokens
_snake_case : int = [pad_token_label_id] + label_ids
_snake_case : Tuple = [cls_token_segment_id] + segment_ids
_snake_case : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_snake_case : Union[str, Any] = [1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE__ )
# Zero-pad up to the sequence length.
_snake_case : Optional[Any] = max_seq_length - len(SCREAMING_SNAKE_CASE__ )
if pad_on_left:
_snake_case : Any = ([pad_token] * padding_length) + input_ids
_snake_case : List[Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_snake_case : Any = ([pad_token_segment_id] * padding_length) + segment_ids
_snake_case : str = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE__ ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE__ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_snake_case : Dict = None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 4_2
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss().ignore_index
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__ = Split.train , ):
"""simple docstring"""
_snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE__ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_snake_case : Dict = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
_snake_case : Dict = torch.load(SCREAMING_SNAKE_CASE__ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
_snake_case : Union[str, Any] = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
_snake_case : List[str] = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 4_2
SCREAMING_SNAKE_CASE_ = -1_0_0
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__ = Split.train , ):
"""simple docstring"""
_snake_case : Union[str, Any] = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
_snake_case : str = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_snake_case : Tuple = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_snake_case : Tuple = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : List[str] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return self.features[i]
| 708 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 7_68 , ):
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE__ ) )
_snake_case : Optional[Any] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
"""simple docstring"""
_snake_case : Dict = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) )
_snake_case : List[Any] = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) )
return self
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : int = (embeds - self.mean) * 1.0 / self.std
return embeds
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Optional[Any] = (embeds * self.std) + self.mean
return embeds
| 519 | 0 |
import qiskit
def lowerCAmelCase_ ( __a , __a ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowerCamelCase__: str =qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase__: str =qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCamelCase__: Tuple =qiskit.execute(__a , __a , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
| 59 |
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> list:
_snake_case = []
_snake_case , _snake_case = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_snake_case = result + left + right
return input_list
def _UpperCAmelCase ( __lowerCamelCase : list ) -> list:
if len(__lowerCamelCase ) <= 1:
return input_list
_snake_case = list(__lowerCamelCase )
# iteration for two-way merging
_snake_case = 2
while p <= len(__lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
_snake_case = i
_snake_case = i + p - 1
_snake_case = (low + high + 1) // 2
_snake_case = merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(__lowerCamelCase ):
_snake_case = i
_snake_case = merge(__lowerCamelCase , 0 , __lowerCamelCase , len(__lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
UpperCAmelCase__ = []
else:
UpperCAmelCase__ = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 224 | 0 |
def UpperCamelCase_( _A :str)-> bool:
UpperCamelCase__ = [int(_A) for i in ip_va_address.split(".") if i.isdigit()]
return len(_A) == 4 and all(0 <= int(_A) <= 2_54 for octet in octets)
if __name__ == "__main__":
__UpperCamelCase = input().strip()
__UpperCamelCase = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 700 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=64 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = embedding_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
UpperCamelCase__ = model(snake_case , token_type_ids=snake_case )
UpperCamelCase__ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForNextSentencePrediction(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , next_sentence_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MegatronBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MegatronBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = MegatronBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
),
) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : int = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
# test_resize_embeddings = False
_UpperCamelCase : List[Any] = False
def snake_case__ ( self , snake_case , snake_case , snake_case=False ):
'''simple docstring'''
UpperCamelCase__ = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
UpperCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case )
def UpperCamelCase_( _A :List[Any] )-> Optional[Any]:
return torch.tensor(
_A , dtype=torch.long , device=_A , )
__UpperCamelCase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("Model is not available." )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
UpperCamelCase__ = os.path.join(os.environ["MYDIR"] , snake_case )
UpperCamelCase__ = MegatronBertModel.from_pretrained(snake_case )
model.to(snake_case )
model.half()
UpperCamelCase__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case )[0]
UpperCamelCase__ = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , snake_case )
UpperCamelCase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
UpperCamelCase__ = output[0, ii, jj]
UpperCamelCase__ = expected[3 * ii + jj]
UpperCamelCase__ = "ii={} jj={} a={} b={}".format(snake_case , snake_case , snake_case , snake_case )
self.assertTrue(math.isclose(snake_case , snake_case , rel_tol=snake_case , abs_tol=snake_case ) , msg=snake_case )
| 185 | 0 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = int(lowerCamelCase_ )
lowercase__ : Optional[Any] = t // 3_600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=300 ):
"""simple docstring"""
return F"""
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
"""
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowercase__ : Any = F"""{elt:.6f}""" if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class snake_case__:
"""simple docstring"""
lowercase_ = 5
lowercase_ = 0.2
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = True , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : List[str] = 300 , ):
lowercase__ : Optional[int] = total
lowercase__ : Optional[int] = '' if prefix is None else prefix
lowercase__ : Tuple = leave
lowercase__ : str = parent
lowercase__ : str = width
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
lowercase__ : Tuple = None
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str = False , SCREAMING_SNAKE_CASE : str = None ):
lowercase__ : Any = value
if comment is not None:
lowercase__ : Union[str, Any] = comment
if self.last_value is None:
lowercase__ : Dict = time.time()
lowercase__ : Tuple = value
lowercase__ : str = None
lowercase__ : Optional[int] = self.warmup
lowercase__ : Optional[Any] = 1
self.update_bar(SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
lowercase__ : List[str] = time.time()
lowercase__ : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowercase__ : Dict = self.elapsed_time / (value - self.start_value)
else:
lowercase__ : int = None
if value >= self.total:
lowercase__ : Dict = self.total
lowercase__ : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowercase__ : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(SCREAMING_SNAKE_CASE )
lowercase__ : int = value
lowercase__ : Tuple = current_time
if self.average_time_per_item is None:
lowercase__ : str = 1
else:
lowercase__ : int = max(int(self.update_every / self.average_time_per_item ) , 1 )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : List[Any] = ' ' * (len(str(self.total ) ) - len(str(SCREAMING_SNAKE_CASE ) )) + str(SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
lowercase__ : int = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
lowercase__ : Union[str, Any] = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
lowercase__ : Union[str, Any] = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def snake_case ( self : Dict ):
lowercase__ : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowercase__ : Optional[Any] = disp.display(disp.HTML(self.html_code ) , display_id=SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self : Any ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class snake_case__(_a ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
super().__init__(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = None if column_names is None else [column_names]
lowercase__ : Any = None
def snake_case ( self : List[Any] ):
lowercase__ : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowercase__ : Dict = disp.display(disp.HTML(self.html_code ) , display_id=SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
if self.inner_table is None:
lowercase__ : Dict = [list(values.keys() ), list(values.values() )]
else:
lowercase__ : Tuple = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(SCREAMING_SNAKE_CASE )
lowercase__ : str = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=300 ):
lowercase__ : List[str] = NotebookProgressBar(SCREAMING_SNAKE_CASE , prefix=SCREAMING_SNAKE_CASE , parent=self , width=SCREAMING_SNAKE_CASE )
return self.child_bar
def snake_case ( self : Any ):
lowercase__ : Optional[Any] = None
self.display()
class snake_case__(_a ):
"""simple docstring"""
def __init__( self : Any ):
lowercase__ : Union[str, Any] = None
lowercase__ : Dict = None
lowercase__ : Dict = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
lowercase__ : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
lowercase__ : Dict = 0
lowercase__ : Tuple = 0
lowercase__ : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
lowercase__ : Union[str, Any] = NotebookTrainingTracker(state.max_steps , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
lowercase__ : str = False
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=None , **SCREAMING_SNAKE_CASE : Tuple ):
if not has_length(SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowercase__ : Optional[int] = self.training_tracker.add_child(len(SCREAMING_SNAKE_CASE ) )
else:
lowercase__ : Optional[int] = NotebookProgressBar(len(SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
if self.prediction_bar is not None:
self.prediction_bar.close()
lowercase__ : Any = None
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any]=None , **SCREAMING_SNAKE_CASE : Optional[int] ):
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowercase__ : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
lowercase__ : List[Any] = state.global_step
self.training_tracker.write_line(SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
if self.training_tracker is not None:
lowercase__ : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
lowercase__ : int = log['loss']
break
if self.first_column == "Epoch":
lowercase__ : Union[str, Any] = int(state.epoch )
else:
lowercase__ : Optional[Any] = state.global_step
lowercase__ : str = 'eval'
for k in metrics:
if k.endswith("_loss" ):
lowercase__ : str = re.sub(r"\_loss$" , "" , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = metrics.pop("total_flos" , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = metrics.pop("epoch" , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = metrics.pop(f"""{metric_key_prefix}_runtime""" , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
lowercase__ : Union[str, Any] = v
else:
lowercase__ : Optional[Any] = k.split("_" )
lowercase__ : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]] )
lowercase__ : Tuple = v
self.training_tracker.write_line(SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
lowercase__ : str = None
# Evaluation takes a long time so we should force the next update.
lowercase__ : Optional[Any] = True
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=SCREAMING_SNAKE_CASE )
lowercase__ : Any = None
| 496 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( _snake_case : list[list[int]] ):
'''simple docstring'''
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(_snake_case ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(_snake_case ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 539 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case (unittest.TestCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=100 ,UpperCAmelCase_=13 ,UpperCAmelCase_=30 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=32 ,UpperCAmelCase_=5 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=10 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,) -> List[Any]:
lowercase__ = parent
lowercase__ = vocab_size
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def _a ( self ) -> List[str]:
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCAmelCase_ ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[int]:
lowercase__ = FlaxBeitModel(config=UpperCAmelCase_ )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Tuple:
lowercase__ = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase_ )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> List[str]:
lowercase__ = self.type_sequence_label_size
lowercase__ = FlaxBeitForImageClassification(config=UpperCAmelCase_ )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = FlaxBeitForImageClassification(UpperCAmelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCAmelCase_ )
def _a ( self ) -> int:
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class snake_case (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :Optional[int] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _a ( self ) -> None:
lowercase__ = FlaxBeitModelTester(self )
lowercase__ = ConfigTester(self ,config_class=UpperCAmelCase_ ,has_text_modality=UpperCAmelCase_ ,hidden_size=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Optional[Any]:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase_ )
lowercase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ )
def _a ( self ) -> int:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ )
lowercase__ = model_class(UpperCAmelCase_ )
@jax.jit
def model_jitted(UpperCAmelCase_ ,**UpperCAmelCase_ ):
return model(pixel_values=UpperCAmelCase_ ,**UpperCAmelCase_ )
with self.subTest("JIT Enabled" ):
lowercase__ = model_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ = model_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) ,len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ ,UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
def _a ( self ) -> List[Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _a ( self ) -> Union[str, Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def _a ( self ) -> str:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def _a ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
lowercase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class snake_case (unittest.TestCase ):
@cached_property
def _a ( self ) -> Optional[int]:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
lowercase__ = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="np" ).pixel_values
# prepare bool_masked_pos
lowercase__ = np.ones((1, 196) ,dtype=UpperCAmelCase_ )
# forward pass
lowercase__ = model(pixel_values=UpperCAmelCase_ ,bool_masked_pos=UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = (1, 196, 8_192)
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,UpperCAmelCase_ ,atol=1E-2 ) )
@slow
def _a ( self ) -> List[str]:
lowercase__ = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="np" )
# forward pass
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = (1, 1_000)
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
lowercase__ = 281
self.assertEqual(logits.argmax(-1 ).item() ,UpperCAmelCase_ )
@slow
def _a ( self ) -> Optional[int]:
lowercase__ = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="np" )
# forward pass
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = (1, 21_841)
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
lowercase__ = 2_396
self.assertEqual(logits.argmax(-1 ).item() ,UpperCAmelCase_ )
| 539 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Tuple = SpeechTaTokenizer
__snake_case :int = False
__snake_case :Dict = True
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = SpeechTaTokenizer(_lowerCAmelCase )
__lowercase = AddedToken("""<mask>""" , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
__lowercase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Optional[int] , _lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = """this is a test"""
__lowercase = """this is a test"""
return input_text, output_text
def _a ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : str=20 , _lowerCAmelCase : List[str]=5 ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.get_input_output_texts(_lowerCAmelCase )
__lowercase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
__lowercase = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
return text, ids
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = """<pad>"""
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self : str ) -> Any:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(_lowerCAmelCase ) , 81 )
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__lowercase = tokenizer.vocab_size
__lowercase = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowercase = tokenizer.add_tokens(_lowerCAmelCase )
__lowercase = tokenizer.vocab_size
__lowercase = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , all_size + len(_lowerCAmelCase ) )
__lowercase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=_lowerCAmelCase )
self.assertGreaterEqual(len(_lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowercase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowercase = tokenizer.add_special_tokens(_lowerCAmelCase )
__lowercase = tokenizer.vocab_size
__lowercase = len(_lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , 0 )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , all_size_a + len(_lowerCAmelCase ) )
__lowercase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=_lowerCAmelCase )
self.assertGreaterEqual(len(_lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Any:
"""simple docstring"""
pass
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(_lowerCAmelCase , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
__lowercase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
# fmt: off
self.assertListEqual(_lowerCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
__lowercase = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=_lowerCAmelCase , )
| 80 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowercase = len(lowerCamelCase )
__lowercase = max(lowerCamelCase )
__lowercase = min(lowerCamelCase )
# create the counting array
__lowercase = coll_max + 1 - coll_min
__lowercase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCamelCase ):
__lowercase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCamelCase ) ):
__lowercase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return "".join([chr(lowerCamelCase ) for i in counting_sort([ord(lowerCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
__UpperCamelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 80 | 1 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowerCAmelCase__ = '''src/transformers'''
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCAmelCase__ = re.compile(R'''^\s*try:''')
# Catches a line with else:
lowerCAmelCase__ = re.compile(R'''^\s*else:''')
def _A ( A__ ):
"""simple docstring"""
if _re_test_backend.search(A__ ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(A__ )]
backends.sort()
return "_and_".join(A__ )
def _A ( A__ ):
"""simple docstring"""
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(A__ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A__ ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A__ ):
__lowercase = _re_one_line_import_struct.search(A__ ).groups()[0]
__lowercase = re.findall(R'''\[([^\]]+)\]''' , A__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(A__ )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A__ ) > 0]
objects.extend(A__ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(A__ ) is not None:
objects.append(_re_import_struct_add_one.search(A__ ).groups()[0] )
elif _re_import_struct_add_many.search(A__ ) is not None:
__lowercase = _re_import_struct_add_many.search(A__ ).groups()[0].split(''', ''' )
__lowercase = [obj[1:-1] for obj in imports if len(A__ ) > 0]
objects.extend(A__ )
elif _re_between_brackets.search(A__ ) is not None:
__lowercase = _re_between_brackets.search(A__ ).groups()[0].split(''', ''' )
__lowercase = [obj[1:-1] for obj in imports if len(A__ ) > 0]
objects.extend(A__ )
elif _re_quote_object.search(A__ ) is not None:
objects.append(_re_quote_object.search(A__ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(A__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(A__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A__ ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(A__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _A ( A__ , A__ ):
"""simple docstring"""
def find_duplicates(A__ ):
return [k for k, v in collections.Counter(A__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = '''base imports''' if key == '''none''' else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _A ( ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = os.path.join(A__ , '''__init__.py''' )
__lowercase = parse_init(A__ )
if objects is not None:
__lowercase = analyze_results(*A__ )
if len(A__ ) > 0:
__lowercase = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('''\n'''.join(A__ ) )
if len(A__ ) > 0:
raise ValueError('''\n\n'''.join(A__ ) )
def _A ( ):
"""simple docstring"""
__lowercase = []
for path, directories, files in os.walk(A__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A__ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
__lowercase = str((Path(A__ ) / folder).relative_to(A__ ) )
__lowercase = short_path.replace(os.path.sep , '''.''' )
submodules.append(A__ )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(A__ ) / fname).relative_to(A__ ) )
__lowercase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A__ )
return submodules
lowerCAmelCase__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def _A ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
__lowercase = direct_transformers_import(A__ )
__lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(A__ , '''__init__.py''' ) , '''r''' ) as f:
__lowercase = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , A__ ) ) )
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(A__ ) > 0:
__lowercase = '''\n'''.join(F"- {module}" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"{list_of_modules}\n"
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 624 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
__lowercase = sum(A__ ) / len(A__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class snake_case__ ( _UpperCAmelCase ):
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Union[str, Any] = 5
# Realm tok
__snake_case : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Tuple = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__snake_case : Optional[int] = os.path.join(__UpperCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : List[Any] = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
def A_ ( self : List[Any] ) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = RealmConfig(num_block_records=self.num_block_records )
return config
def A_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=__UpperCAmelCase , )
return block_records
def A_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case : Any = self.get_config()
__snake_case : Optional[int] = self.get_dummy_retriever()
__snake_case : Dict = retriever.tokenizer
__snake_case : int = np.array([0, 3] , dtype='long' )
__snake_case : Dict = tokenizer(['Test question'] ).input_ids
__snake_case : Any = tokenizer(
['the fourth'] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids
__snake_case : Optional[int] = config.reader_seq_len
__snake_case , __snake_case , __snake_case , __snake_case : List[Any] = retriever(
__UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors='np' )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.get_config()
__snake_case : int = self.get_dummy_retriever()
__snake_case : List[str] = retriever.tokenizer
__snake_case : Any = np.array([0, 3, 5] , dtype='long' )
__snake_case : Optional[int] = tokenizer(['Test question'] ).input_ids
__snake_case : List[str] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids
__snake_case : Dict = config.reader_seq_len
__snake_case , __snake_case , __snake_case , __snake_case : Dict = retriever(
__UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors='np' )
self.assertEqual([False, True, True] , __UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __UpperCAmelCase )
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
__snake_case : Any = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
__snake_case : Dict = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
__snake_case : Dict = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
| 286 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a_ = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowercase__ :
a_ =PegasusConfig
a_ ={}
a_ ="""gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , )-> Any:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase__ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase__ = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase__ = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = 20
lowerCAmelCase__ = model_class_name(__UpperCAmelCase )
lowerCAmelCase__ = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase__ , lowerCAmelCase__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase__ = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCAmelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
lowerCAmelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCAmelCase , )
lowerCAmelCase__ = model.decode(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = 20
lowerCAmelCase__ = model_class_name(__UpperCAmelCase )
lowerCAmelCase__ = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase__ , lowerCAmelCase__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase__ = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
lowerCAmelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
lowerCAmelCase__ = model.decode(__UpperCAmelCase , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase )
lowerCAmelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : List[str]=None , ) -> Tuple:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ = np.not_equal(UpperCamelCase_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase__ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =(
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a_ =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a_ =True
a_ =False
a_ =False
a_ =False
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = FlaxPegasusModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = model_class(__UpperCAmelCase )
@jax.jit
def encode_jitted(__UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
return model.encode(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
with self.subTest("JIT Enabled" ):
lowerCAmelCase__ = encode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase__ = encode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
lowerCAmelCase__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return model.decode(
decoder_input_ids=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , encoder_outputs=__UpperCAmelCase , )
with self.subTest("JIT Enabled" ):
lowerCAmelCase__ = decode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase__ = decode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase__ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__UpperCAmelCase )
lowerCAmelCase__ = np.ones((1, 1) )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
lowerCAmelCase__ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
lowerCAmelCase__ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
lowerCAmelCase__ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" , truncation=__UpperCAmelCase , max_length=512 , padding=__UpperCAmelCase )
lowerCAmelCase__ = model.generate(**__UpperCAmelCase , num_beams=2 ).sequences
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
assert tgt_text == decoded
| 339 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[np.ndarray, bytes, str] , **SCREAMING_SNAKE_CASE__ : int ) -> int:
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]="This is a sound of {}." ) -> int:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCAmelCase__ = requests.get(SCREAMING_SNAKE_CASE__ ).content
else:
with open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
lowerCAmelCase__ = f.read()
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = ffmpeg_read(SCREAMING_SNAKE_CASE__ , self.feature_extractor.sampling_rate )
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
lowerCAmelCase__ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(SCREAMING_SNAKE_CASE__ ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = [text_inputs]
return inputs
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> str:
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=0 )
lowerCAmelCase__ = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , key=lambda SCREAMING_SNAKE_CASE__ : -x[0] )
]
return result
| 125 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
lowerCAmelCase__ = n
lowerCAmelCase__ = [None] * self.n
lowerCAmelCase__ = 0 # index of the first element
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def __len__( self : str ) -> int:
return self.size
def a ( self : Any ) -> bool:
return self.size == 0
def a ( self : Dict ) -> List[str]:
return False if self.is_empty() else self.array[self.front]
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
lowerCAmelCase__ = data
lowerCAmelCase__ = (self.rear + 1) % self.n
self.size += 1
return self
def a ( self : int ) -> Tuple:
if self.size == 0:
raise Exception("UNDERFLOW" )
lowerCAmelCase__ = self.array[self.front]
lowerCAmelCase__ = None
lowerCAmelCase__ = (self.front + 1) % self.n
self.size -= 1
return temp
| 125 | 1 |
from ...configuration_utils import PretrainedConfig
__lowerCamelCase : str = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class _lowercase ( __lowercase ):
_a : Tuple = "tapas"
def __init__( self , a=3_0_5_2_2 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=1_0_2_4 , a=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , a=0.02 , a=1e-12 , a=0 , a=10.0 , a=0 , a=1.0 , a=None , a=1.0 , a=False , a=None , a=1.0 , a=1.0 , a=False , a=False , a="ratio" , a=None , a=None , a=6_4 , a=3_2 , a=False , a=True , a=False , a=False , a=True , a=False , a=None , a=None , **a , ):
super().__init__(pad_token_id=a , **a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case__ : List[str] =vocab_size
snake_case__ : Optional[int] =hidden_size
snake_case__ : int =num_hidden_layers
snake_case__ : Dict =num_attention_heads
snake_case__ : int =hidden_act
snake_case__ : List[Any] =intermediate_size
snake_case__ : Optional[int] =hidden_dropout_prob
snake_case__ : List[str] =attention_probs_dropout_prob
snake_case__ : List[str] =max_position_embeddings
snake_case__ : Optional[Any] =type_vocab_sizes
snake_case__ : List[Any] =initializer_range
snake_case__ : Any =layer_norm_eps
# Fine-tuning task hyperparameters
snake_case__ : Dict =positive_label_weight
snake_case__ : str =num_aggregation_labels
snake_case__ : Any =aggregation_loss_weight
snake_case__ : Any =use_answer_as_supervision
snake_case__ : int =answer_loss_importance
snake_case__ : str =use_normalized_answer_loss
snake_case__ : int =huber_loss_delta
snake_case__ : int =temperature
snake_case__ : List[str] =aggregation_temperature
snake_case__ : Dict =use_gumbel_for_cells
snake_case__ : List[Any] =use_gumbel_for_aggregation
snake_case__ : List[Any] =average_approximation_function
snake_case__ : Optional[int] =cell_selection_preference
snake_case__ : List[Any] =answer_loss_cutoff
snake_case__ : Tuple =max_num_rows
snake_case__ : Any =max_num_columns
snake_case__ : List[Any] =average_logits_per_cell
snake_case__ : str =select_one_column
snake_case__ : int =allow_empty_column_selection
snake_case__ : Dict =init_cell_selection_weights_to_zero
snake_case__ : Optional[int] =reset_position_index_per_cell
snake_case__ : Optional[int] =disable_per_token_loss
# Aggregation hyperparameters
snake_case__ : List[Any] =aggregation_labels
snake_case__ : Optional[Any] =no_aggregation_label_index
if isinstance(self.aggregation_labels , a ):
snake_case__ : Optional[Any] ={int(a ): v for k, v in aggregation_labels.items()}
| 385 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __A(lowerCAmelCase ) -> Dict:
"""simple docstring"""
_UpperCamelCase = os.path.join(args.tf_model_dir , """parameters.json""" )
_UpperCamelCase = json.loads(open(lowerCAmelCase ).read() )
if not params:
raise ValueError(
F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith(""".pt""" ):
_UpperCamelCase = args.output + """.pt"""
_UpperCamelCase = OrderedDict()
with tf.device("""/CPU:0""" ):
_UpperCamelCase = tf.train.load_checkpoint(args.tf_model_dir )
_UpperCamelCase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_UpperCamelCase = reader.get_tensor(lowerCAmelCase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
_UpperCamelCase = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
_UpperCamelCase = 8
_UpperCamelCase = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/moe""" ):
_UpperCamelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/softmlp/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
_UpperCamelCase = key_name[-9:-7]
for i in range(1_6 ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
_UpperCamelCase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/mlp""" ):
_UpperCamelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/p1/bias""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/p2/kernel""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/p2/bias""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/ln""" ):
_UpperCamelCase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.norm.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/g""" ):
_UpperCamelCase = """model.blocks.%d.feed_forward.norm.weight""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/att""" ):
_UpperCamelCase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
_UpperCamelCase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_UpperCamelCase = state[:, 0, :, :]
_UpperCamelCase = state[:, 1, :, :]
_UpperCamelCase = state[:, 2, :, :]
_UpperCamelCase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
_UpperCamelCase = torch.tensor(lowerCAmelCase )
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
_UpperCamelCase = torch.tensor(lowerCAmelCase )
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/o/kernel""" ):
_UpperCamelCase = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
_UpperCamelCase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/an""" ):
_UpperCamelCase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_UpperCamelCase = """model.blocks.%d.self_attn.norm.bias""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.endswith("""/g""" ):
_UpperCamelCase = """model.blocks.%d.self_attn.norm.weight""" % player
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
_UpperCamelCase = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
_UpperCamelCase = """model.%s.weight""" % nlayer
_UpperCamelCase = vnp.copy() # same in embedded
_UpperCamelCase = torch.tensor(lowerCAmelCase )
if key_name.startswith("""model/wte""" ):
_UpperCamelCase = """lm_head.weight"""
_UpperCamelCase = vnp.copy() # same in embedded
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name.startswith("""model/wob""" ):
_UpperCamelCase = """final_logits_bias"""
_UpperCamelCase = vnp.copy() # same in embedded
_UpperCamelCase = state.reshape((1, -1) )
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense/kernel":
_UpperCamelCase = """model.last_project.weight"""
_UpperCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCamelCase = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense_1/bias":
_UpperCamelCase = """model.last_project.bias"""
_UpperCamelCase = vnp.copy() # same because it is one dimensional
_UpperCamelCase = torch.tensor(lowerCAmelCase )
torch.save(lowerCAmelCase , args.output )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
lowerCamelCase__ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 612 | 0 |
"""simple docstring"""
from math import sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> int:
_lowerCamelCase = 0
for i in range(1 , int(sqrt(snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case ):
total += i + n // i
elif i == sqrt(snake_case ):
total += i
return total - n
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 10_000 )-> int:
_lowerCamelCase = sum(
i
for i in range(1 , snake_case )
if sum_of_divisors(sum_of_divisors(snake_case ) ) == i and sum_of_divisors(snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 720 |
"""simple docstring"""
A_ : List[Any] =9.8_0665
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float , snake_case : float = g )-> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 222 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _a ( __lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = prime_factors(__lowercase )
if is_square_free(__lowercase ):
return -1 if len(__lowercase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def _a ( __lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , __lowercase )
return [m.group(0 ) for m in matches]
def _a ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCamelCase = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCamelCase = collections.defaultdict(__lowercase )
__UpperCamelCase = collections.defaultdict(__lowercase )
__UpperCamelCase = collections.defaultdict(__lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__lowercase ):
__UpperCamelCase = None
if _re_tf_models.match(__lowercase ) is not None:
__UpperCamelCase = tf_models
__UpperCamelCase = _re_tf_models.match(__lowercase ).groups()[0]
elif _re_flax_models.match(__lowercase ) is not None:
__UpperCamelCase = flax_models
__UpperCamelCase = _re_flax_models.match(__lowercase ).groups()[0]
elif _re_pt_models.match(__lowercase ) is not None:
__UpperCamelCase = pt_models
__UpperCamelCase = _re_pt_models.match(__lowercase ).groups()[0]
if lookup_dict is not None:
while len(__lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCamelCase = True
break
# Try again after removing the last word in the name
__UpperCamelCase = ''.join(camel_case_split(__lowercase )[:-1] )
__UpperCamelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCamelCase = list(__lowercase )
all_models.sort()
__UpperCamelCase = {'model_type': all_models}
__UpperCamelCase = [pt_models[t] for t in all_models]
__UpperCamelCase = [tf_models[t] for t in all_models]
__UpperCamelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCamelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCamelCase = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCamelCase = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCamelCase = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCamelCase = 'AutoTokenizer'
__UpperCamelCase = [processors[t] for t in all_models]
return pd.DataFrame(__lowercase )
def _a ( __lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCamelCase = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
__UpperCamelCase = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__lowercase , __lowercase , __lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(__lowercase , __lowercase ):
continue
# First extract all model_names
__UpperCamelCase = []
for name in getattr(__lowercase , __lowercase ).values():
if isinstance(__lowercase , __lowercase ):
model_names.append(__lowercase )
else:
model_names.extend(list(__lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _a ( __lowercase , __lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = get_frameworks_table()
__UpperCamelCase = Dataset.from_pandas(__lowercase )
__UpperCamelCase = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=__lowercase )
__UpperCamelCase = Dataset.from_json(__lowercase )
__UpperCamelCase = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(__lowercase ) )
}
__UpperCamelCase = update_pipeline_and_auto_class_table(__lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCamelCase = sorted(table.keys() )
__UpperCamelCase = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__UpperCamelCase = Dataset.from_pandas(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__lowercase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(__lowercase , 'pipeline_tags.json' ) )
if commit_sha is not None:
__UpperCamelCase = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCamelCase = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=__lowercase , repo_type='dataset' , token=__lowercase , commit_message=__lowercase , )
def _a ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCamelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCamelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCamelCase = pipeline_tasks[key]['pt']
if isinstance(__lowercase , (list, tuple) ):
__UpperCamelCase = model[0]
__UpperCamelCase = model.__name__
if model not in in_table.values():
missing.append(__lowercase )
if len(__lowercase ) > 0:
__UpperCamelCase = ', '.join(__lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 383 | 1 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = '''CompVis/stable-diffusion-v1-1'''
SCREAMING_SNAKE_CASE__ = '''CompVis/stable-diffusion-v1-2'''
SCREAMING_SNAKE_CASE__ = '''CompVis/stable-diffusion-v1-3'''
SCREAMING_SNAKE_CASE__ = '''CompVis/stable-diffusion-v1-4'''
class _UpperCamelCase( __lowerCamelCase ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict = True , ):
'''simple docstring'''
super()._init_()
__a : List[str] = StableDiffusionPipeline.from_pretrained(a_ )
__a : Union[str, Any] = StableDiffusionPipeline.from_pretrained(a_ )
__a : Dict = StableDiffusionPipeline.from_pretrained(a_ )
__a : Union[str, Any] = StableDiffusionPipeline(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=a_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return {k: getattr(self , a_ ) for k in self.config.keys() if not k.startswith('_' )}
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.enable_attention_slicing(a_ )
@torch.no_grad()
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str = 5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any] = 5_1_2 , SCREAMING_SNAKE_CASE__ : List[str] = 5_0 , SCREAMING_SNAKE_CASE__ : str = 7.5 , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Dict = 0.0 , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : Any = "pil" , SCREAMING_SNAKE_CASE__ : Any = True , SCREAMING_SNAKE_CASE__ : Dict = None , SCREAMING_SNAKE_CASE__ : str = 1 , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[int] = 5_0 , SCREAMING_SNAKE_CASE__ : Optional[int] = 7.5 , SCREAMING_SNAKE_CASE__ : Dict = None , SCREAMING_SNAKE_CASE__ : Tuple = 1 , SCREAMING_SNAKE_CASE__ : Any = 0.0 , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : Tuple = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = "pil" , SCREAMING_SNAKE_CASE__ : List[Any] = True , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] = 5_1_2 , SCREAMING_SNAKE_CASE__ : str = 5_1_2 , SCREAMING_SNAKE_CASE__ : Tuple = 5_0 , SCREAMING_SNAKE_CASE__ : str = 7.5 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Any = 1 , SCREAMING_SNAKE_CASE__ : int = 0.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None , SCREAMING_SNAKE_CASE__ : Any = "pil" , SCREAMING_SNAKE_CASE__ : int = True , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : List[str] = 1 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] = 5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[int] = 5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[int] = 5_0 , SCREAMING_SNAKE_CASE__ : Dict = 7.5 , SCREAMING_SNAKE_CASE__ : Dict = None , SCREAMING_SNAKE_CASE__ : Any = 1 , SCREAMING_SNAKE_CASE__ : List[str] = 0.0 , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : Tuple = "pil" , SCREAMING_SNAKE_CASE__ : List[Any] = True , SCREAMING_SNAKE_CASE__ : Dict = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] = 5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any] = 5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[int] = 5_0 , SCREAMING_SNAKE_CASE__ : List[Any] = 7.5 , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : List[str] = 1 , SCREAMING_SNAKE_CASE__ : Any = 0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : List[str] = "pil" , SCREAMING_SNAKE_CASE__ : Dict = True , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 , **SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
__a : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
self.to(a_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
__a : Optional[Any] = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
__a : str = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
__a : Tuple = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
__a : Any = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 707 |
def UpperCAmelCase__ ( lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 7 , lowerCamelCase_ : int = 1_0_0_0_0_0_0 ):
__a : Optional[int] = 0
__a : Any = 1
for current_denominator in range(1 , limit + 1 ):
__a : Optional[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__a : int = current_numerator
__a : List[str] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 577 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 660 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
SCREAMING_SNAKE_CASE_ = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = "https://pypi.org/pypi/diffusers/json"
_UpperCAmelCase : Optional[Any] = json.loads(request.urlopen(lowerCAmelCase ).read() )["releases"].keys()
return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : version.Version(lowerCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( ) -> int:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_UpperCAmelCase : Optional[int] = Path(lowerCAmelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, os.PathLike] ) -> Optional[int]:
init_hf_modules()
_UpperCAmelCase : Any = Path(lowerCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Tuple:
with open(lowerCAmelCase , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase : List[str] = f.read()
# Imports of the form `import .xxx`
_UpperCAmelCase : Tuple = re.findall("^\s*import\s+\.(\S+)\s*$" , lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , lowerCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Tuple = [module_file]
_UpperCAmelCase : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_UpperCAmelCase : Optional[int] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCAmelCase ) )
_UpperCAmelCase : List[str] = Path(lowerCAmelCase ).parent
_UpperCAmelCase : Optional[int] = [str(module_path / m ) for m in new_imports]
_UpperCAmelCase : int = [f for f in new_import_files if f not in all_relative_imports]
_UpperCAmelCase : Optional[int] = [F'{f}.py' for f in new_import_files]
_UpperCAmelCase : Union[str, Any] = len(lowerCAmelCase ) == 0
all_relative_imports.extend(lowerCAmelCase )
return all_relative_imports
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> str:
with open(lowerCAmelCase , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase : Optional[Any] = f.read()
# Imports of the form `import xxx`
_UpperCAmelCase : Optional[int] = re.findall("^\s*import\s+(\S+)\s*$" , lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , lowerCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
_UpperCAmelCase : Any = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
_UpperCAmelCase : List[Any] = list(set(lowerCAmelCase ) )
_UpperCAmelCase : Optional[Any] = []
for imp in imports:
try:
importlib.import_module(lowerCAmelCase )
except ImportError:
missing_packages.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F'{", ".join(lowerCAmelCase )}. Run `pip install {" ".join(lowerCAmelCase )}`' )
return get_relative_imports(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: Optional[Any] ) -> int:
_UpperCAmelCase : Tuple = module_path.replace(os.path.sep , "." )
_UpperCAmelCase : Union[str, Any] = importlib.import_module(lowerCAmelCase )
if class_name is None:
return find_pipeline_class(lowerCAmelCase )
return getattr(lowerCAmelCase , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> List[str]:
from ..pipelines import DiffusionPipeline
_UpperCAmelCase : Optional[Any] = dict(inspect.getmembers(lowerCAmelCase , inspect.isclass ) )
_UpperCAmelCase : Any = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCAmelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
_UpperCAmelCase : List[Any] = cls
return pipeline_class
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, os.PathLike] , lowerCAmelCase: str , lowerCAmelCase: Optional[Union[str, os.PathLike]] = None , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: Optional[Dict[str, str]] = None , lowerCAmelCase: Optional[Union[bool, str]] = None , lowerCAmelCase: Optional[str] = None , lowerCAmelCase: bool = False , ) -> int:
_UpperCAmelCase : Optional[Any] = str(lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.isfile(lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = module_file_or_url
_UpperCAmelCase : Any = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
_UpperCAmelCase : List[Any] = get_diffusers_versions()
# cut ".dev0"
_UpperCAmelCase : Dict = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
_UpperCAmelCase : Dict = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
_UpperCAmelCase : List[Any] = F'v{revision}'
elif revision == "main":
_UpperCAmelCase : List[str] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
_UpperCAmelCase : str = COMMUNITY_PIPELINES_URL.format(revision=lowerCAmelCase , pipeline=lowerCAmelCase )
try:
_UpperCAmelCase : str = cached_download(
lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , proxies=lowerCAmelCase , resume_download=lowerCAmelCase , local_files_only=lowerCAmelCase , use_auth_token=lowerCAmelCase , )
_UpperCAmelCase : Dict = "git"
_UpperCAmelCase : str = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
_UpperCAmelCase : Tuple = hf_hub_download(
lowerCAmelCase , lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , proxies=lowerCAmelCase , resume_download=lowerCAmelCase , local_files_only=lowerCAmelCase , use_auth_token=lowerCAmelCase , )
_UpperCAmelCase : int = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
_UpperCAmelCase : Optional[Any] = check_imports(lowerCAmelCase )
# Now we move the module inside our cached dynamic modules.
_UpperCAmelCase : Any = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCAmelCase )
_UpperCAmelCase : List[str] = Path(lowerCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
_UpperCAmelCase : Dict = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCAmelCase , lowerCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_UpperCAmelCase : Dict = use_auth_token
elif use_auth_token is True:
_UpperCAmelCase : List[str] = HfFolder.get_token()
else:
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Tuple = model_info(lowerCAmelCase , revision=lowerCAmelCase , token=lowerCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_UpperCAmelCase : Dict = submodule_path / commit_hash
_UpperCAmelCase : Optional[int] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCAmelCase , F'{module_needed}.py' , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , resume_download=lowerCAmelCase , proxies=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , local_files_only=lowerCAmelCase , )
return os.path.join(lowerCAmelCase , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, os.PathLike] , lowerCAmelCase: str , lowerCAmelCase: Optional[str] = None , lowerCAmelCase: Optional[Union[str, os.PathLike]] = None , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: Optional[Dict[str, str]] = None , lowerCAmelCase: Optional[Union[bool, str]] = None , lowerCAmelCase: Optional[str] = None , lowerCAmelCase: bool = False , **lowerCAmelCase: List[Any] , ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = get_cached_module_file(
lowerCAmelCase , lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , resume_download=lowerCAmelCase , proxies=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , local_files_only=lowerCAmelCase , )
return get_class_in_module(lowerCAmelCase , final_module.replace(".py" , "" ) )
| 467 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Tuple , lowerCAmelCase: Any=None , **lowerCAmelCase: Any ) -> Dict:
_UpperCAmelCase : Optional[Any] = [x.strip() for x in open(lowerCAmelCase ).readlines()]
_UpperCAmelCase : str = [x.strip() for x in open(lowerCAmelCase ).readlines()][: len(lowerCAmelCase )]
_UpperCAmelCase : str = calculate_rouge(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
if save_path is not None:
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 467 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE : Optional[int] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Any = """tapas"""
def __init__( self , __a=3_0522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1024 , __a=[3, 256, 256, 2, 256, 256, 10] , __a=0.02 , __a=1E-12 , __a=0 , __a=10.0 , __a=0 , __a=1.0 , __a=None , __a=1.0 , __a=False , __a=None , __a=1.0 , __a=1.0 , __a=False , __a=False , __a="ratio" , __a=None , __a=None , __a=64 , __a=32 , __a=False , __a=True , __a=False , __a=False , __a=True , __a=False , __a=None , __a=None , **__a , ):
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_sizes
A__ = initializer_range
A__ = layer_norm_eps
# Fine-tuning task hyperparameters
A__ = positive_label_weight
A__ = num_aggregation_labels
A__ = aggregation_loss_weight
A__ = use_answer_as_supervision
A__ = answer_loss_importance
A__ = use_normalized_answer_loss
A__ = huber_loss_delta
A__ = temperature
A__ = aggregation_temperature
A__ = use_gumbel_for_cells
A__ = use_gumbel_for_aggregation
A__ = average_approximation_function
A__ = cell_selection_preference
A__ = answer_loss_cutoff
A__ = max_num_rows
A__ = max_num_columns
A__ = average_logits_per_cell
A__ = select_one_column
A__ = allow_empty_column_selection
A__ = init_cell_selection_weights_to_zero
A__ = reset_position_index_per_cell
A__ = disable_per_token_loss
# Aggregation hyperparameters
A__ = aggregation_labels
A__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __a ):
A__ = {int(__a ): v for k, v in aggregation_labels.items()}
| 260 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[str] = CodeGenTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] = CodeGenTokenizerFast
SCREAMING_SNAKE_CASE_: Any = True
SCREAMING_SNAKE_CASE_: str = {"""add_prefix_space""": True}
SCREAMING_SNAKE_CASE_: Any = False
def _UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
A__ = dict(zip(__a , range(len(__a ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
A__ = tokens + [tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer(add_prefix_space=__a )
A__ = 'lower newer'
# Testing tokenization
A__ = tokenizer.tokenize(__a , add_prefix_space=__a )
A__ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
A__ = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
A__ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
A__ = self.get_rust_tokenizer(add_prefix_space=__a )
A__ = tokenizer.encode(__a , add_prefix_space=__a )
A__ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
A__ = tokens + [rust_tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def _UpperCAmelCase ( self , *__a , **__a ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self , __a=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input looooooooong', 'This is a simple input']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
A__ = tokenizer.pad_token_id
A__ = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
A__ = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
A__ = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
A__ = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = '$$$'
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = tokenizer.bos_token_id
A__ = tokenizer(__a )
A__ = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A__ = tokenizer.decode(out_s.input_ids )
A__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
A__ = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
A__ = '\nif len_a > len_b: result = a\nelse: result = b'
A__ = tokenizer.encode(__a )
A__ = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
A__ = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
| 260 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __snake_case :
'''simple docstring'''
def _a ( self ):
torch.manual_seed(0 )
a__ = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
a__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
a__ = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=a_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
a__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _a ( self ):
torch.manual_seed(0 )
a__ = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
a__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
a__ = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=a_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
a__ = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
a__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _a ( self ):
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ = self.get_dummy_inputs(a_ )
a__ = inputs["""prompt"""]
a__ = inputs["""generator"""]
a__ = inputs["""num_inference_steps"""]
a__ = inputs["""output_type"""]
if "image" in inputs:
a__ = inputs["""image"""]
else:
a__ = None
if "mask_image" in inputs:
a__ = inputs["""mask_image"""]
else:
a__ = None
if "original_image" in inputs:
a__ = inputs["""original_image"""]
else:
a__ = None
a__ , a__ = pipe.encode_prompt(a_ )
# inputs with prompt converted to embeddings
a__ = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
a__ = image
if mask_image is not None:
a__ = mask_image
if original_image is not None:
a__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
a__ = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
a__ = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
a__ = self.get_dummy_inputs(a_ )
a__ = inputs["""generator"""]
a__ = inputs["""num_inference_steps"""]
a__ = inputs["""output_type"""]
# inputs with prompt converted to embeddings
a__ = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
a__ = image
if mask_image is not None:
a__ = mask_image
if original_image is not None:
a__ = original_image
a__ = pipe_loaded(**a_ )[0]
a__ = np.abs(to_np(a_ ) - to_np(a_ ) ).max()
self.assertLess(a_ , 1E-4 )
def _a ( self ):
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ = self.get_dummy_inputs(a_ )
a__ = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
a__ = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
a__ = self.get_dummy_inputs(a_ )
a__ = pipe_loaded(**a_ )[0]
a__ = np.abs(to_np(a_ ) - to_np(a_ ) ).max()
self.assertLess(a_ , 1E-4 )
| 351 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False, False, False
@dataclass
class __snake_case :
'''simple docstring'''
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : bool = True
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[str] = None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] = "dict"
UpperCamelCase__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()})
UpperCamelCase__ : str = field(default="""Audio""" ,init=SCREAMING_SNAKE_CASE ,repr=SCREAMING_SNAKE_CASE)
def __call__( self ):
return self.pa_type
def _a ( self , a_ ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(a_ , a_ ):
return {"bytes": None, "path": value}
elif isinstance(a_ , a_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
a__ = BytesIO()
sf.write(a_ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
a__ = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
a__ = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
a__ = BytesIO(bytes() )
sf.write(a_ , a_ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _a ( self , a_ , a_ = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
a__ , a__ = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
a__ = xsplitext(a_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
a__ = token_per_repo_id or {}
a__ = path.split("""::""" )[-1]
try:
a__ = string_to_dict(a_ , config.HUB_DATASETS_URL )["""repo_id"""]
a__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
a__ = None
with xopen(a_ , """rb""" , use_auth_token=a_ ) as f:
a__ , a__ = sf.read(a_ )
else:
a__ , a__ = sf.read(a_ )
a__ = array.T
if self.mono:
a__ = librosa.to_mono(a_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
a__ = librosa.resample(a_ , orig_sr=a_ , target_sr=self.sampling_rate )
a__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _a ( self ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _a ( self , a_ ):
if pa.types.is_string(storage.type ):
a__ = pa.array([None] * len(a_ ) , type=pa.binary() )
a__ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
a__ = pa.array([None] * len(a_ ) , type=pa.string() )
a__ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
a__ = pa.array([Audio().encode_example(a_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
a__ = storage.field("""bytes""" )
else:
a__ = pa.array([None] * len(a_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
a__ = storage.field("""path""" )
else:
a__ = pa.array([None] * len(a_ ) , type=pa.string() )
a__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(a_ , self.pa_type )
def _a ( self , a_ ):
@no_op_if_value_is_null
def path_to_bytes(a_ ):
with xopen(a_ , """rb""" ) as f:
a__ = f.read()
return bytes_
a__ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
a__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
a__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(a_ , self.pa_type )
| 351 | 1 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_lowerCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
_lowerCAmelCase = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
"""emoji""": True,
},
}
]
_lowerCAmelCase = 0
for log in Path().glob("""*.log"""):
_lowerCAmelCase = 0
with open(log, """r""") as f:
for line in f:
_lowerCAmelCase = json.loads(line)
if line.get("""nodeid""", """""") != "":
_lowerCAmelCase = line["""nodeid"""]
if line.get("""duration""", None) is not None:
_lowerCAmelCase = F"""{line['duration']:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_lowerCAmelCase = []
log.unlink()
_lowerCAmelCase = """"""
_lowerCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
_lowerCAmelCase = []
_lowerCAmelCase = {}
for test in failed_tests:
_lowerCAmelCase = test[0].split("""::""")
_lowerCAmelCase = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
_lowerCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_lowerCAmelCase = [test[0] for test in failed_table]
_lowerCAmelCase = list(set(files))
# Count number of instances in failed_tests
_lowerCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_lowerCAmelCase = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
_lowerCAmelCase = """Too many failed tests, please see the full report in the Action results."""
_lowerCAmelCase = len(err) + 10
_lowerCAmelCase = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
_lowerCAmelCase = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
_lowerCAmelCase = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
_lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
_lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
_lowerCAmelCase = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
_lowerCAmelCase = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
_lowerCAmelCase = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_lowerCAmelCase = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
_lowerCAmelCase = row[0]
else:
_lowerCAmelCase = """"""
_lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
) | 137 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _a ,_a ,_a ) -> str:
# Initialise PyTorch model
UpperCAmelCase_: Any = RemBertConfig.from_json_file(_a )
print("Building PyTorch model from configuration: {}".format(str(_a ) ) )
UpperCAmelCase_: Dict = RemBertModel(_a )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_a ,_a ,_a )
# Save pytorch-model
print("Save PyTorch model to {}".format(_a ) )
torch.save(model.state_dict() ,_a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 137 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase : Dict = 16
lowercase : str = 32
def lowerCAmelCase_ ( snake_case__ , snake_case__ = 16 ):
'''simple docstring'''
A : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
A : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A : int = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A : Tuple = 16
elif accelerator.mixed_precision != "no":
A : Union[str, Any] = 8
else:
A : Optional[int] = None
return tokenizer.pad(
snake_case__ , padding='''longest''' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A : Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase : Any = mocked_dataloaders # noqa: F811
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case__ ) == "1":
A : Tuple = 2
# New Code #
A : Union[str, Any] = int(args.gradient_accumulation_steps )
A : Tuple = int(args.local_sgd_steps )
# Initialize accelerator
A : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : Union[str, Any] = config['''lr''']
A : str = int(config['''num_epochs'''] )
A : Optional[Any] = int(config['''seed'''] )
A : List[Any] = int(config['''batch_size'''] )
A : str = evaluate.load('''glue''' , '''mrpc''' )
set_seed(snake_case__ )
A : int = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
A : Any = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
A : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A : str = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
A : Any = model(**snake_case__ )
A : int = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : List[str] = model(**snake_case__ )
A : str = outputs.logits.argmax(dim=-1 )
A : List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case__ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=snake_case__ , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A : Union[str, Any] = parser.parse_args()
A : Optional[Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A ( __snake_case ):
__magic_name__ = '''mobilenet_v1'''
def __init__( self , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE="relu6" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.999 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=0.001 , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
A : Optional[Any] = num_channels
A : List[str] = image_size
A : List[str] = depth_multiplier
A : Optional[Any] = min_depth
A : Dict = hidden_act
A : List[str] = tf_padding
A : Any = classifier_dropout_prob
A : Union[str, Any] = initializer_range
A : Union[str, Any] = layer_norm_eps
class A ( __snake_case ):
__magic_name__ = version.parse('''1.11''' )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
return 1e-4
| 343 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a: Dict = logging.get_logger(__name__)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : str = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase__ : int = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
lowercase__ : Any = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase__ : Union[str, Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase__ : str = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = dct.pop(a__ )
lowercase__ : Any = val
def __UpperCamelCase ( UpperCAmelCase ):
if "handwritten" in checkpoint_url:
lowercase__ : Any = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase__ : str = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
lowercase__ : str = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[str] = ViTConfig(image_size=384 , qkv_bias=a__ )
lowercase__ : Optional[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase__ : Dict = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase__ : Optional[int] = 1024
lowercase__ : Optional[int] = 4096
lowercase__ : Tuple = 24
lowercase__ : Dict = 16
lowercase__ : List[str] = 1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase__ : Dict = False
lowercase__ : List[str] = '''relu'''
lowercase__ : Optional[int] = 1024
lowercase__ : Tuple = True
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
# load HuggingFace model
lowercase__ : Any = ViTModel(a__ , add_pooling_layer=a__ )
lowercase__ : Optional[Any] = TrOCRForCausalLM(a__ )
lowercase__ : Dict = VisionEncoderDecoderModel(encoder=a__ , decoder=a__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase__ : Union[str, Any] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' , check_hash=a__ )['''model''']
lowercase__ : Optional[Any] = create_rename_keys(a__ , a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase__ : Union[str, Any] = state_dict.pop(a__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
lowercase__ : str = val
else:
lowercase__ : int = val
# load state dict
model.load_state_dict(a__ )
# Check outputs on an image
lowercase__ : Union[str, Any] = ViTImageProcessor(size=encoder_config.image_size )
lowercase__ : List[Any] = RobertaTokenizer.from_pretrained('''roberta-large''' )
lowercase__ : int = TrOCRProcessor(a__ , a__ )
lowercase__ : str = processor(images=prepare_img(a__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
lowercase__ : Tuple = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase__ : Union[str, Any] = model(pixel_values=a__ , decoder_input_ids=a__ )
lowercase__ : Tuple = outputs.logits
lowercase__ : int = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase__ : Optional[int] = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase__ : List[str] = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
lowercase__ : Union[str, Any] = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
lowercase__ : Tuple = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , a__ , atol=1E-3 ), "First elements of logits not as expected"
Path(a__ ).mkdir(exist_ok=a__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(a__ )
if __name__ == "__main__":
__a: Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__a: List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 152 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _UpperCamelCase (a__ :Tuple , a__ :List[str]=False ):
"""simple docstring"""
UpperCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _UpperCamelCase (a__ :int , a__ :Tuple , a__ :Union[str, Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ = """"""
else:
UpperCamelCase__ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
UpperCamelCase__ = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
UpperCamelCase__ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def _UpperCamelCase (a__ :Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def _UpperCamelCase (a__ :List[str] , a__ :Any , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = dct.pop(a__ )
UpperCamelCase__ = val
def _UpperCamelCase (a__ :Dict , a__ :Dict ):
"""simple docstring"""
UpperCamelCase__ = ViTMSNConfig()
UpperCamelCase__ = 1000
UpperCamelCase__ = """datasets/huggingface/label-files"""
UpperCamelCase__ = """imagenet-1k-id2label.json"""
UpperCamelCase__ = json.load(open(hf_hub_download(a__ , a__ ) , """r""" ) )
UpperCamelCase__ = {int(a__ ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCamelCase__ = 384
UpperCamelCase__ = 1536
UpperCamelCase__ = 6
elif "l16" in checkpoint_url:
UpperCamelCase__ = 1024
UpperCamelCase__ = 4096
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
elif "b4" in checkpoint_url:
UpperCamelCase__ = 4
elif "l7" in checkpoint_url:
UpperCamelCase__ = 7
UpperCamelCase__ = 1024
UpperCamelCase__ = 4096
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
UpperCamelCase__ = ViTMSNModel(a__ )
UpperCamelCase__ = torch.hub.load_state_dict_from_url(a__ , map_location="""cpu""" )["""target_encoder"""]
UpperCamelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(a__ )
UpperCamelCase__ = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , base_model=a__ )
model.load_state_dict(a__ )
model.eval()
UpperCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ = Image.open(requests.get(a__ , stream=a__ ).raw )
UpperCamelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=a__ , image_std=a__ )
UpperCamelCase__ = image_processor(images=a__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ = model(**a__ )
UpperCamelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
UpperCamelCase__ = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 619 | 0 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , UpperCamelCase_ ):
super().__init__(UpperCamelCase_ )
__magic_name__ = CLIPVisionModelWithProjection(config.vision_config )
__magic_name__ = nn.Linear(config.vision_config.projection_dim , 1 )
__magic_name__ = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0.5 , UpperCamelCase_=0.5 ):
__magic_name__ = self.vision_model(UpperCamelCase_ )[0]
__magic_name__ = self.p_head(UpperCamelCase_ )
__magic_name__ = nsfw_detected.flatten()
__magic_name__ = nsfw_detected > p_threshold
__magic_name__ = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
__magic_name__ = np.zeros(images[idx].shape )
__magic_name__ = self.w_head(UpperCamelCase_ )
__magic_name__ = watermark_detected.flatten()
__magic_name__ = watermark_detected > w_threshold
__magic_name__ = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
__magic_name__ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 190 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase = {
"bert-base-uncased": 5_12,
"bert-large-uncased": 5_12,
"bert-base-cased": 5_12,
"bert-large-cased": 5_12,
"bert-base-multilingual-uncased": 5_12,
"bert-base-multilingual-cased": 5_12,
"bert-base-chinese": 5_12,
"bert-base-german-cased": 5_12,
"bert-large-uncased-whole-word-masking": 5_12,
"bert-large-cased-whole-word-masking": 5_12,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_12,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_12,
"bert-base-cased-finetuned-mrpc": 5_12,
"bert-base-german-dbmdz-cased": 5_12,
"bert-base-german-dbmdz-uncased": 5_12,
"TurkuNLP/bert-base-finnish-cased-v1": 5_12,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_12,
"wietsedv/bert-base-dutch-cased": 5_12,
}
__lowerCamelCase = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = BertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__magic_name__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
__magic_name__ = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
__magic_name__ = do_lower_case
__magic_name__ = strip_accents
__magic_name__ = tokenize_chinese_chars
__magic_name__ = normalizer_class(**UpperCamelCase_ )
__magic_name__ = do_lower_case
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
__magic_name__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__magic_name__ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 190 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.