code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __get__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=None ) -> str:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
__magic_name__ = """__cached_""" + self.fget.__name__
__magic_name__ = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if cached is None:
__magic_name__ = self.fget(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return cached
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( A_ ):
'''simple docstring'''
if is_torch_fx_proxy(A_ ):
return True
if is_torch_available():
import torch
if isinstance(A_, torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A_, tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A_, (jnp.ndarray, Tracer) ):
return True
return isinstance(A_, np.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return isinstance(A_, np.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return _is_numpy(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
return isinstance(A_, torch.Tensor )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
return isinstance(A_, torch.device )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
if isinstance(A_, A_ ):
if hasattr(A_, A_ ):
__magic_name__ = getattr(A_, A_ )
else:
return False
return isinstance(A_, torch.dtype )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(A_ )
def a__ ( A_ ):
'''simple docstring'''
import tensorflow as tf
return isinstance(A_, tf.Tensor )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(A_ )
def a__ ( A_ ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A_, """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(A_ )
return type(A_ ) == tf.Tensor
def a__ ( A_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(A_ )
def a__ ( A_ ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(A_, jnp.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(A_ )
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (dict, UserDict) ):
return {k: to_py_obj(A_ ) for k, v in obj.items()}
elif isinstance(A_, (list, tuple) ):
return [to_py_obj(A_ ) for o in obj]
elif is_tf_tensor(A_ ):
return obj.numpy().tolist()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A_ ):
return np.asarray(A_ ).tolist()
elif isinstance(A_, (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (dict, UserDict) ):
return {k: to_numpy(A_ ) for k, v in obj.items()}
elif isinstance(A_, (list, tuple) ):
return np.array(A_ )
elif is_tf_tensor(A_ ):
return obj.numpy()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A_ ):
return np.asarray(A_ )
else:
return obj
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase__ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
__magic_name__ = getattr(self , class_fields[0].name )
__magic_name__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = first_field.items()
__magic_name__ = True
else:
try:
__magic_name__ = iter(UpperCamelCase__ )
__magic_name__ = True
except TypeError:
__magic_name__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase__ ):
if (
not isinstance(UpperCamelCase__ , (list, tuple) )
or not len(UpperCamelCase__ ) == 2
or not isinstance(element[0] , UpperCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__magic_name__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__magic_name__ = element[1]
elif first_field is not None:
__magic_name__ = first_field
else:
for field in class_fields:
__magic_name__ = getattr(self , field.name )
if v is not None:
__magic_name__ = v
def __delitem__( self : Any , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Any , UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def __setitem__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : str ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """longest"""
a__ = """max_length"""
a__ = """do_not_pad"""
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pt"""
a__ = """tf"""
a__ = """np"""
a__ = """jax"""
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[ContextManager] ) -> Any:
"""simple docstring"""
__magic_name__ = context_managers
__magic_name__ = ExitStack()
def __enter__( self : Optional[int] ) -> int:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase__ )
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
self.stack.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = infer_framework(A_ )
if framework == "tf":
__magic_name__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = model_class.__name__
__magic_name__ = infer_framework(A_ )
if framework == "tf":
__magic_name__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( A_, A_ = "", A_ = "." ):
'''simple docstring'''
def _flatten_dict(A_, A_="", A_="." ):
for k, v in d.items():
__magic_name__ = str(A_ ) + delimiter + str(A_ ) if parent_key else k
if v and isinstance(A_, A_ ):
yield from flatten_dict(A_, A_, delimiter=A_ ).items()
else:
yield key, v
return dict(_flatten_dict(A_, A_, A_ ) )
@contextmanager
def a__ ( A_, A_ = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( A_, A_=None ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.transpose(A_, axes=A_ )
elif is_torch_tensor(A_ ):
return array.T if axes is None else array.permute(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.transpose(A_, perm=A_ )
elif is_jax_tensor(A_ ):
return jnp.transpose(A_, axes=A_ )
else:
raise ValueError(f'''Type not supported for transpose: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.reshape(A_, A_ )
elif is_torch_tensor(A_ ):
return array.reshape(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.reshape(A_, A_ )
elif is_jax_tensor(A_ ):
return jnp.reshape(A_, A_ )
else:
raise ValueError(f'''Type not supported for reshape: {type(A_ )}.''' )
def a__ ( A_, A_=None ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.squeeze(A_, axis=A_ )
elif is_torch_tensor(A_ ):
return array.squeeze() if axis is None else array.squeeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.squeeze(A_, axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.squeeze(A_, axis=A_ )
else:
raise ValueError(f'''Type not supported for squeeze: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.expand_dims(A_, A_ )
elif is_torch_tensor(A_ ):
return array.unsqueeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.expand_dims(A_, axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.expand_dims(A_, axis=A_ )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A_ )}.''' )
def a__ ( A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.size(A_ )
elif is_torch_tensor(A_ ):
return array.numel()
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.size(A_ )
elif is_jax_tensor(A_ ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(A_, (tuple, list) ):
__magic_name__ = [f'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
__magic_name__ = f'''{repo_id}--{value}'''
return auto_map
def a__ ( A_ ):
'''simple docstring'''
for base_class in inspect.getmro(A_ ):
__magic_name__ = base_class.__module__
__magic_name__ = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 88 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"]
SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"])
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 76 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =tempfile.mkdtemp()
__lowercase =BlipImageProcessor()
__lowercase =GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model')
__lowercase =BlipaProcessor(_lowerCAmelCase , _lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
def __lowerCamelCase ( self : Tuple , **_lowerCAmelCase : int):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase).tokenizer
def __lowerCamelCase ( self : Union[str, Any] , **_lowerCAmelCase : str):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase).image_processor
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
__lowercase =[Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__lowercase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__lowercase =self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0)
__lowercase =BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase =self.prepare_image_inputs()
__lowercase =image_processor(_lowerCAmelCase , return_tensors='np')
__lowercase =processor(images=_lowerCAmelCase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase ='lower newer'
__lowercase =processor(text=_lowerCAmelCase)
__lowercase =tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase ='lower newer'
__lowercase =self.prepare_image_inputs()
__lowercase =processor(text=_lowerCAmelCase , images=_lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase):
processor()
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase =processor.batch_decode(_lowerCAmelCase)
__lowercase =tokenizer.batch_decode(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =BlipaProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase ='lower newer'
__lowercase =self.prepare_image_inputs()
__lowercase =processor(text=_lowerCAmelCase , images=_lowerCAmelCase)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 364 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for attribute in key.split('.' ):
__lowercase =getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
__lowercase =getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
__lowercase =hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase =value
elif weight_type == "weight_g":
__lowercase =value
elif weight_type == "weight_v":
__lowercase =value
elif weight_type == "bias":
__lowercase =value
else:
__lowercase =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
__lowercase =fairseq_model.state_dict()
__lowercase =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase =None
for name, value in fairseq_dict.items():
__lowercase =False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowercase =True
elif name.split('.' )[0] == "proj":
__lowercase =fairseq_model.proj
__lowercase =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase =True
if "*" in mapped_key:
__lowercase =name.split(_lowerCAmelCase )[0].split('.' )[-2]
__lowercase =mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
__lowercase ='weight_g'
elif "weight_v" in name:
__lowercase ='weight_v'
elif "bias" in name:
__lowercase ='bias'
elif "weight" in name:
__lowercase ='weight'
else:
__lowercase =None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =full_name.split('conv_layers.' )[-1]
__lowercase =name.split('.' )
__lowercase =int(items[0] )
__lowercase =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase =emb.weight.shape
__lowercase =nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
__lowercase =emb.weight.data
return lin_layer
def _A ( _lowerCAmelCase ):
"""simple docstring"""
with open(_lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
__lowercase =f.readlines()
__lowercase =[line.split(' ' )[0] for line in lines]
__lowercase =len(_lowerCAmelCase )
__lowercase ={
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__lowercase =WavaVecaConfig.from_pretrained(_lowerCAmelCase )
__lowercase =SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
__lowercase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
__lowercase , __lowercase , __lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase =model[0].eval()
# set weights for wav2vec2 encoder
__lowercase =WavaVecaModel(_lowerCAmelCase )
__lowercase =recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
__lowercase =SpeechaTextaForCausalLM(_lowerCAmelCase )
__lowercase , __lowercase =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase =SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
__lowercase =False
# add projection layer
__lowercase =nn.Parameter(projection_layer.weight )
__lowercase =nn.Parameter(projection_layer.bias )
__lowercase =create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'vocab.json' ) , 'w' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , 'vocab.json' ) )
tokenizer.save_pretrained(_lowerCAmelCase )
__lowercase =hf_wavavec.config.to_dict()
__lowercase =tokenizer.pad_token_id
__lowercase =tokenizer.bos_token_id
__lowercase =tokenizer.eos_token_id
__lowercase ='speech_to_text_2'
__lowercase ='wav2vec2'
__lowercase =SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
lowerCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 48 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]=10 ):
__lowercase : List[Any] = []
for _ in range(lowerCAmelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int=10 ):
__lowercase : List[str] = []
for step in range(lowerCAmelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Any = os.path.join(lowerCAmelCase_ , """schedule.bin""" )
torch.save(scheduler.state_dict() , lowerCAmelCase_ )
__lowercase : Dict = torch.load(lowerCAmelCase_ )
scheduler.load_state_dict(lowerCAmelCase_ )
return lrs
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] , __a : Union[str, Any] , __a : Optional[Any] , __a : Any ) -> Tuple:
"""simple docstring"""
self.assertEqual(len(__a ) , len(__a ) )
for a, b in zip(__a , __a ):
self.assertAlmostEqual(__a , __a , delta=__a )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__a )
__lowercase : List[str] = torch.tensor([0.4, 0.2, -0.5] )
__lowercase : Union[str, Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowercase : Optional[Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
__lowercase : str = criterion(__a , __a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__a )
__lowercase : str = torch.tensor([0.4, 0.2, -0.5] )
__lowercase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowercase : Optional[Any] = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__a , weight_decay=0.0 , relative_step=__a , scale_parameter=__a , warmup_init=__a , )
for _ in range(1000 ):
__lowercase : int = criterion(__a , __a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_A : List[str] = nn.Linear(50 , 50 ) if is_torch_available() else None
_A : str = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_A : str = 10
def lowerCAmelCase ( self : List[Any] , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : int=None ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(len(__a ) , len(__a ) )
for a, b in zip(__a , __a ):
self.assertAlmostEqual(__a , __a , delta=__a , msg=__a )
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__lowercase : Any = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__lowercase , __lowercase : int = data
__lowercase : Union[str, Any] = scheduler_func(self.optimizer , **__a )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__lowercase : Optional[Any] = unwrap_schedule(__a , self.num_steps )
self.assertListAlmostEqual(
__a , __a , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
__lowercase : Any = scheduler_func(self.optimizer , **__a )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__a ) # wrap to test picklability of the schedule
__lowercase : Optional[Any] = unwrap_and_save_reload_schedule(__a , self.num_steps )
self.assertListEqual(__a , __a , msg=F"failed for {scheduler_func} in save and reload" )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Dict = fn
def __call__( self : Optional[Any] , *__a : int , **__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.fn(*__a , **__a )
@classmethod
def lowerCAmelCase ( self : Any , __a : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : int = list(map(self , scheduler.lr_lambdas ) ) | 233 |
lowerCamelCase : Dict = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 233 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 2_5_6
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = cva.imread(__lowerCamelCase , 0 )
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.img )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
_SCREAMING_SNAKE_CASE = np.sum(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = x[i] / self.k
self.sk += prk
_SCREAMING_SNAKE_CASE = (self.L - 1) * self.sk
if self.rem != 0:
_SCREAMING_SNAKE_CASE = int(last % last )
_SCREAMING_SNAKE_CASE = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = int(np.ma.count(self.img ) / self.img[1].size )
_SCREAMING_SNAKE_CASE = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_SCREAMING_SNAKE_CASE = self.img[j][i]
if num != self.last_list[num]:
_SCREAMING_SNAKE_CASE = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase_ = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 111 |
'''simple docstring'''
lowerCamelCase_ = 'Tobias Carryer'
from time import time
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=int(time() ) ): # noqa: B008
"""simple docstring"""
_SCREAMING_SNAKE_CASE = multiplier
_SCREAMING_SNAKE_CASE = increment
_SCREAMING_SNAKE_CASE = modulo
_SCREAMING_SNAKE_CASE = seed
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowerCamelCase_ = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 111 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Dict = '''▁'''
snake_case : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
snake_case : Any = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
snake_case : int = {'''vinai/bartpho-syllable''': 10_24}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
a :List[str] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
a :Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
a :Optional[int] = vocab_file
a :List[Any] = monolingual_vocab_file
a :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
a :List[str] = {}
a :Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowerCamelCase ) not in self.fairseq_tokens_to_ids:
a :List[str] = cnt
cnt += 1
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
a :Union[str, Any] = line.strip().split()[0]
a :Union[str, Any] = len(self.fairseq_tokens_to_ids )
if str(_lowerCamelCase ) not in self.fairseq_tokens_to_ids:
a :Optional[int] = len(self.fairseq_tokens_to_ids )
a :Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
a :Union[str, Any] = self.__dict__.copy()
a :List[Any] = None
a :Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowerCamelCase ):
a :Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a :int = {}
a :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a :Union[str, Any] = [self.cls_token_id]
a :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :Tuple = [self.sep_token_id]
a :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a :int = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a :Optional[int] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
a :List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 94 |
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ):
"""simple docstring"""
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a :Optional[int] = [p / w for p, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
a :List[Any] = sorted(UpperCAmelCase_ )
# declaring useful variables
a :Dict = len(UpperCAmelCase_ )
a :Tuple = 0
a :List[Any] = 0
a :str = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a :List[Any] = sorted_profit_by_weight[length - i - 1]
a :Optional[Any] = profit_by_weight.index(UpperCAmelCase_ )
a :Optional[int] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Union[str, Any] = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Tuple = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : str = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 94 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'vit'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Tuple=12 , _lowercase :Any=12 , _lowercase :int=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Union[str, Any]=0.0 , _lowercase :List[str]=0.0 , _lowercase :int=0.02 , _lowercase :List[Any]=1e-12 , _lowercase :Any=2_24 , _lowercase :Tuple=16 , _lowercase :Any=3 , _lowercase :List[str]=True , _lowercase :Optional[Any]=16 , **_lowercase :Any , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = version.parse('1.11' )
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase ( self :str ):
'''simple docstring'''
return 1e-4
| 201 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _A ( __magic_name__ ): # picklable for multiprocessing
return x.sum()
def _A ( __magic_name__ ): # picklable for multiprocessing
return i + 1
@dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = {}
lowercase__ = []
lowercase__ = 1
lowercase__ = [1, 2]
lowercase__ = {"a": 1, "b": 2}
lowercase__ = {"a": [1, 2], "b": [3, 4]}
lowercase__ = {"a": {"1": 1}, "b": 2}
lowercase__ = {"a": 1, "b": 2, "c": 3, "d": 4}
lowercase__ = {}
lowercase__ = []
lowercase__ = 2
lowercase__ = [2, 3]
lowercase__ = {"a": 2, "b": 3}
lowercase__ = {"a": [2, 3], "b": [4, 5]}
lowercase__ = {"a": {"1": 2}, "b": 3}
lowercase__ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
lowercase__ = 2
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
lowercase__ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
lowercase__ = {"a": 2, "b": 0, "c": 2}
lowercase__ = {
"a": np.eye(2 ).astype(_lowercase ),
"b": np.zeros(3 ).astype(_lowercase ),
"c": np.ones(2 ).astype(_lowercase ),
}
self.assertEqual(map_nested(_lowercase , _lowercase , map_numpy=_lowercase ) , _lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowercase , _lowercase , map_numpy=_lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowercase , _lowercase , map_numpy=_lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowercase , _lowercase , map_numpy=_lowercase , num_proc=_lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowercase ): # can't pickle a local lambda
map_nested(lambda _lowercase : x + 1 , _lowercase , num_proc=_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = {"a": 1, "b": 2}
lowercase__ = {"a": 3, "b": 4}
lowercase__ = {"a": 5, "b": 6}
lowercase__ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowercase , _lowercase , _lowercase ) ) , _lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
class lowerCAmelCase :
__lowerCamelCase = 'bar'
lowercase__ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(_lowercase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
lowercase__ = {f'''{i}''': i for i in range(__magic_name__ )}
lowercase__ = map_nested(lambda __magic_name__ : x + 10 , __magic_name__ , num_proc=__magic_name__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowerCAmelCase ( lowercase_ ):
@require_tf
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
lowercase__ = layers.Dense(2 )
def gen_random_output():
lowercase__ = tf.random.uniform((1, 3) )
return model(_lowercase ).numpy()
with temp_seed(42 , set_tensorflow=_lowercase ):
lowercase__ = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowercase ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
import torch
def gen_random_output():
lowercase__ = torch.nn.Linear(3 , 2 )
lowercase__ = torch.rand(1 , 3 )
return model(_lowercase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowercase ):
lowercase__ = gen_random_output()
with temp_seed(42 , set_pytorch=_lowercase ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
lowercase__ = gen_random_output()
with temp_seed(42 ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def _A ( __magic_name__ ):
lowercase__ = NestedDataStructure(__magic_name__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = NestedDataStructure(__magic_name__ ).flatten()
assert output == expected_output
def _A ( ):
lowercase__ = A(x=1 , y="foobar" )
lowercase__ = {"x": 1, "y": "foobar"}
assert asdict(__magic_name__ ) == expected_output
lowercase__ = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
lowercase__ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__magic_name__ ) == expected_output
with pytest.raises(__magic_name__ ):
asdict([1, A(x=10 , y="foo" )] )
def _A ( __magic_name__ ):
return text.split()
def _A ( __magic_name__ ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _A ( ):
with Pool(2 ) as pool:
lowercase__ = list(iflatmap_unordered(__magic_name__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__magic_name__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowercase__ = list(iflatmap_unordered(__magic_name__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__magic_name__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowercase__ = []
for yield_time, content in iflatmap_unordered(
__magic_name__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__magic_name__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__magic_name__ ) == 4
| 201 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCamelCase : Tuple = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCamelCase : int = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__lowerCamelCase : List[str] = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCamelCase : int = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCamelCase : Any = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , lowerCAmelCase )
return [m.group(0 ) for m in matches]
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE_ : Any = collections.defaultdict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collections.defaultdict(lowerCAmelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = None
if _re_tf_models.match(lowerCAmelCase ) is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = tf_models
SCREAMING_SNAKE_CASE_ : List[str] = _re_tf_models.match(lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase ) is not None:
SCREAMING_SNAKE_CASE_ : Tuple = flax_models
SCREAMING_SNAKE_CASE_ : Tuple = _re_flax_models.match(lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase ) is not None:
SCREAMING_SNAKE_CASE_ : int = pt_models
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _re_pt_models.match(lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase ) > 0:
if attr_name in model_prefix_to_model_type:
SCREAMING_SNAKE_CASE_ : Any = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "".join(camel_case_split(lowerCAmelCase )[:-1] )
SCREAMING_SNAKE_CASE_ : int = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
SCREAMING_SNAKE_CASE_ : List[Any] = list(lowerCAmelCase )
all_models.sort()
SCREAMING_SNAKE_CASE_ : Dict = {"model_type": all_models}
SCREAMING_SNAKE_CASE_ : str = [pt_models[t] for t in all_models]
SCREAMING_SNAKE_CASE_ : Any = [tf_models[t] for t in all_models]
SCREAMING_SNAKE_CASE_ : Tuple = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
SCREAMING_SNAKE_CASE_ : Tuple = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE_ : List[str] = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
SCREAMING_SNAKE_CASE_ : Dict = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE_ : Optional[int] = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
SCREAMING_SNAKE_CASE_ : Dict = "AutoTokenizer"
SCREAMING_SNAKE_CASE_ : int = [processors[t] for t in all_models]
return pd.DataFrame(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
SCREAMING_SNAKE_CASE_ : Any = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}']
SCREAMING_SNAKE_CASE_ : Optional[Any] = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowerCAmelCase , lowerCAmelCase ):
continue
# First extract all model_names
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for name in getattr(lowerCAmelCase , lowerCAmelCase ).values():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
model_names.append(lowerCAmelCase )
else:
model_names.extend(list(lowerCAmelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = get_frameworks_table()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Dataset.from_pandas(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = Dataset.from_json(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(lowerCAmelCase ) )
}
SCREAMING_SNAKE_CASE_ : Optional[int] = update_pipeline_and_auto_class_table(lowerCAmelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
SCREAMING_SNAKE_CASE_ : List[str] = sorted(table.keys() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
SCREAMING_SNAKE_CASE_ : Dict = Dataset.from_pandas(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowerCAmelCase , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(lowerCAmelCase , "pipeline_tags.json" ) )
if commit_sha is not None:
SCREAMING_SNAKE_CASE_ : Tuple = (
f'Update with commit {commit_sha}\n\nSee: '
f'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=lowerCAmelCase , repo_type="dataset" , token=lowerCAmelCase , commit_message=lowerCAmelCase , )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = transformers_module.pipelines.SUPPORTED_TASKS
SCREAMING_SNAKE_CASE_ : List[Any] = []
for key in pipeline_tasks:
if key not in in_table:
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline_tasks[key]["pt"]
if isinstance(lowerCAmelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model[0]
SCREAMING_SNAKE_CASE_ : List[Any] = model.__name__
if model not in in_table.values():
missing.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ : List[str] = ", ".join(lowerCAmelCase )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
__lowerCamelCase : List[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 18 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Any = (DDPMParallelScheduler,)
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[str] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_UpperCamelCase )
return config
def __UpperCAmelCase ( self ) -> Any:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.check_over_configs(thresholding=_UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , )
def __UpperCAmelCase ( self ) -> Any:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = len(_UpperCamelCase )
UpperCAmelCase_ : Any = self.dummy_model()
UpperCAmelCase_ : List[Any] = self.dummy_sample_deter
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : str = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : Tuple = samplea.shape[0]
UpperCAmelCase_ : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ : str = torch.arange(_UpperCamelCase )[0:3, None].repeat(1 , _UpperCamelCase )
UpperCAmelCase_ : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ : Dict = scheduler.batch_step_no_noise(_UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1E-2
assert abs(result_mean.item() - 0.50_05 ) < 1E-3
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : Optional[int] = self.dummy_sample_deter
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
UpperCAmelCase_ : str = pred_prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCAmelCase_ : Tuple = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = len(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
UpperCAmelCase_ : Any = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
UpperCAmelCase_ : List[str] = pred_prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCamelCase )
UpperCAmelCase_ : Tuple = scheduler.timesteps
for i, timestep in enumerate(_UpperCamelCase ):
if i == len(_UpperCamelCase ) - 1:
UpperCAmelCase_ : List[str] = -1
else:
UpperCAmelCase_ : int = timesteps[i + 1]
UpperCAmelCase_ : int = scheduler.previous_timestep(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = prev_t.item()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Tuple = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(_UpperCamelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = [1_0_0, 8_7, 5_0, 1, 0]
UpperCAmelCase_ : Optional[Any] = len(_UpperCamelCase )
with self.assertRaises(_UpperCamelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_UpperCamelCase , timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Optional[Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_UpperCamelCase )
| 145 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Any = (DDPMParallelScheduler,)
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[str] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_UpperCamelCase )
return config
def __UpperCAmelCase ( self ) -> Any:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.check_over_configs(thresholding=_UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , )
def __UpperCAmelCase ( self ) -> Any:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = len(_UpperCamelCase )
UpperCAmelCase_ : Any = self.dummy_model()
UpperCAmelCase_ : List[Any] = self.dummy_sample_deter
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : str = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : Tuple = samplea.shape[0]
UpperCAmelCase_ : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ : str = torch.arange(_UpperCamelCase )[0:3, None].repeat(1 , _UpperCamelCase )
UpperCAmelCase_ : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ : Dict = scheduler.batch_step_no_noise(_UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1E-2
assert abs(result_mean.item() - 0.50_05 ) < 1E-3
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : Optional[int] = self.dummy_sample_deter
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
UpperCAmelCase_ : str = pred_prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCAmelCase_ : Tuple = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = len(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
UpperCAmelCase_ : Any = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
UpperCAmelCase_ : List[str] = pred_prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCamelCase )
UpperCAmelCase_ : Tuple = scheduler.timesteps
for i, timestep in enumerate(_UpperCamelCase ):
if i == len(_UpperCamelCase ) - 1:
UpperCAmelCase_ : List[str] = -1
else:
UpperCAmelCase_ : int = timesteps[i + 1]
UpperCAmelCase_ : int = scheduler.previous_timestep(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = prev_t.item()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Tuple = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(_UpperCamelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = [1_0_0, 8_7, 5_0, 1, 0]
UpperCAmelCase_ : Optional[Any] = len(_UpperCamelCase )
with self.assertRaises(_UpperCamelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_UpperCamelCase , timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Optional[Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_UpperCamelCase )
| 145 | 1 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = len(__SCREAMING_SNAKE_CASE )
snake_case_ = len(__SCREAMING_SNAKE_CASE )
snake_case_ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
snake_case_ = True
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
snake_case_ = True
if a[i].islower():
snake_case_ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = ["""pixel_values"""]
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , )-> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: int = size if size is not None else {"shortest_edge": 2_5_6}
__lowerCAmelCase: str = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
__lowerCAmelCase: Any = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase: Optional[Any] = get_size_dict(UpperCamelCase__ , param_name="crop_size")
__lowerCAmelCase: str = do_resize
__lowerCAmelCase: Any = size
__lowerCAmelCase: Dict = resample
__lowerCAmelCase: Tuple = do_center_crop
__lowerCAmelCase: str = crop_size
__lowerCAmelCase: List[Any] = do_rescale
__lowerCAmelCase: int = rescale_factor
__lowerCAmelCase: List[Any] = do_normalize
__lowerCAmelCase: Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase: Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , )-> np.ndarray:
'''simple docstring'''
__lowerCAmelCase: int = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
__lowerCAmelCase: Optional[Any] = get_resize_output_image_size(UpperCamelCase__ , size=size["shortest_edge"] , default_to_square=UpperCamelCase__)
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , )-> np.ndarray:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = get_size_dict(UpperCamelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int])-> np.ndarray:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , )-> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[Any] , )-> Dict:
'''simple docstring'''
__lowerCAmelCase: Any = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase: str = size if size is not None else self.size
__lowerCAmelCase: Tuple = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
__lowerCAmelCase: List[str] = resample if resample is not None else self.resample
__lowerCAmelCase: str = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase: Tuple = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase: List[Any] = get_size_dict(UpperCamelCase__ , param_name="crop_size")
__lowerCAmelCase: List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase: Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase: Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase: Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase: Tuple = image_std if image_std is not None else self.image_std
__lowerCAmelCase: Union[str, Any] = make_list_of_images(UpperCamelCase__)
if not valid_images(UpperCamelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
__lowerCAmelCase: Tuple = [to_numpy_array(UpperCamelCase__) for image in images]
if do_resize:
__lowerCAmelCase: Union[str, Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__) for image in images]
if do_center_crop:
__lowerCAmelCase: Optional[Any] = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__) for image in images]
if do_rescale:
__lowerCAmelCase: Optional[Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__) for image in images]
if do_normalize:
__lowerCAmelCase: List[str] = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__) for image in images]
__lowerCAmelCase: Optional[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__) for image in images]
__lowerCAmelCase: List[str] = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__)
def lowercase_ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Tuple] = None)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__) != len(UpperCamelCase__):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(UpperCamelCase__):
__lowerCAmelCase: Optional[int] = target_sizes.numpy()
__lowerCAmelCase: List[Any] = []
for idx in range(len(UpperCamelCase__)):
__lowerCAmelCase: List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCamelCase__)
else:
__lowerCAmelCase: Tuple = logits.argmax(dim=1)
__lowerCAmelCase: Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 217 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _a ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self: List[Any] ) -> int:
"""simple docstring"""
lowercase__ = ort.SessionOptions()
lowercase__ = False
return options
def lowerCamelCase_ ( self: List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
lowercase__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''A red cat sitting on a park bench'''
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 93 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '<<<<<<< This should probably be modified because it mentions: '
lowerCAmelCase = '=======\n>>>>>>>\n'
lowerCAmelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _a ( UpperCamelCase__ ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> int:
"""simple docstring"""
lowercase__ = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: Dict , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = get_logger('''datasets-cli/converting''' )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(UpperCamelCase_ )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__ = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__ = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda UpperCamelCase_ : e in out_line , UpperCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase_ ) + '''\n''' )
out_lines.append(UpperCamelCase_ )
out_lines.append(UpperCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__ = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(UpperCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace('''.py''' , '''''' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase_ )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase_ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(UpperCamelCase_ )
lowercase__ = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(UpperCamelCase_ , UpperCamelCase_ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 93 | 1 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A_ = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 13_10_72,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
}
def A_ ( snake_case , snake_case ):
return torch.atana(snake_case , snake_case ) / math.pi * 2
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE:Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class _snake_case ( _a ):
pass
class _snake_case ( nn.Module ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : str ):
super().__init__()
SCREAMING_SNAKE_CASE:List[Any] = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE__ ,n_attn_layers=4 )
SCREAMING_SNAKE_CASE:List[str] = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE:Dict = torch.quasirandom.SobolEngine(1 ,scramble=SCREAMING_SNAKE_CASE__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
A_ = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
A_ = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
A_ = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
A_ = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def A_ ( snake_case ):
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A_ ( snake_case ):
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def A_ ( snake_case , snake_case=13 ):
SCREAMING_SNAKE_CASE:Optional[Any] = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
SCREAMING_SNAKE_CASE:List[str] = 0
if string.startswith("net.3." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[6:]
elif string.startswith("net." ):
SCREAMING_SNAKE_CASE:int = string[4:]
while string.startswith("main.7." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[7:]
if string.startswith("main." ):
SCREAMING_SNAKE_CASE:str = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE:Tuple = string[:2]
SCREAMING_SNAKE_CASE:Optional[Any] = string[2:]
else:
SCREAMING_SNAKE_CASE:Optional[Any] = string[0]
SCREAMING_SNAKE_CASE:Optional[Any] = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE:Any = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:List[str] = "mid_block"
elif depth > 0 and int(snake_case ) < 7:
SCREAMING_SNAKE_CASE:Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Dict = F'''down_blocks.{depth}'''
elif depth > 0 and int(snake_case ) > 7:
SCREAMING_SNAKE_CASE:Any = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
SCREAMING_SNAKE_CASE:Optional[int] = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Any = F'''up_blocks.{max_depth - 1}''' if int(snake_case ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
SCREAMING_SNAKE_CASE:List[Any] = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE:List[str] = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE:List[Any] = convert_attn_naming(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = new_string_left
if not isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = prefix + "." + new_layer + "." + string_left
else:
SCREAMING_SNAKE_CASE:int = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE:str = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = transform_conv_attns(snake_case , snake_case , snake_case )
else:
SCREAMING_SNAKE_CASE:Optional[int] = v
return new_state_dict
def A_ ( snake_case , snake_case , snake_case ):
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE:List[str] = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE:Optional[Any] = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE:Optional[int] = v.shape[0]
SCREAMING_SNAKE_CASE:Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE:Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE:List[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE:List[str] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
SCREAMING_SNAKE_CASE:List[str] = download(snake_case )
SCREAMING_SNAKE_CASE:List[str] = MODELS_MAP[model_name]["sample_rate"]
SCREAMING_SNAKE_CASE:Tuple = MODELS_MAP[model_name]["sample_size"]
SCREAMING_SNAKE_CASE:Union[str, Any] = Object()
SCREAMING_SNAKE_CASE:int = sample_size
SCREAMING_SNAKE_CASE:Any = sample_rate
SCREAMING_SNAKE_CASE:List[str] = 0
SCREAMING_SNAKE_CASE:Optional[Any] = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE:Optional[Any] = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )["state_dict"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE:Dict = orig_model.state_dict()
SCREAMING_SNAKE_CASE:Union[str, Any] = rename_orig_weights(snake_case )
SCREAMING_SNAKE_CASE:Dict = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE:Dict = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(snake_case ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE:Dict = value.squeeze()
SCREAMING_SNAKE_CASE:Union[str, Any] = value
diffusers_model.load_state_dict(snake_case )
SCREAMING_SNAKE_CASE:int = 100
SCREAMING_SNAKE_CASE:int = 33
SCREAMING_SNAKE_CASE:Any = IPNDMScheduler(num_train_timesteps=snake_case )
SCREAMING_SNAKE_CASE:str = torch.manual_seed(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
SCREAMING_SNAKE_CASE:int = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
SCREAMING_SNAKE_CASE:List[Any] = get_crash_schedule(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE:Union[str, Any] = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
SCREAMING_SNAKE_CASE:Tuple = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
SCREAMING_SNAKE_CASE:Union[str, Any] = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE:Union[str, Any] = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE:str = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , snake_case )
print("Diff max" , snake_case )
assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
A_ = parser.parse_args()
main(args)
| 139 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A_ ( snake_case ):
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 139 | 1 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=7 ,A=False ,A=True ,A=False ,A=False ,A=19 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
UpperCAmelCase = EsmConfig(
vocab_size=33 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=A ,esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} ,)
return config
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = EsmForProteinFolding(config=A ).float()
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A )
UpperCAmelCase = model(A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = (EsmForProteinFolding,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = ()
SCREAMING_SNAKE_CASE = {} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = EsmFoldModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip("""Does not support attention outputs""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip
def _UpperCamelCase ( self ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold only has one output format.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _UpperCamelCase ( self ):
pass
@require_torch
class lowerCamelCase__ ( snake_case ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
UpperCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase = model(A )["""positions"""]
UpperCAmelCase = torch.tensor([2.5828, 0.7993, -10.9334] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,A ,atol=1e-4 ) )
| 234 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234 | 1 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :Distribution , snake_case :int=None , snake_case :str=None , snake_case :Dict=0 ):
'''simple docstring'''
A_ : Dict = 1.0 if scale is None else scale
A_ : List[str] = 0.0 if loc is None else loc
super().__init__(snake_case , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=snake_case )] )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self.variance.sqrt()
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :Optional[int] , snake_case :int , snake_case :Dict[str, int] , snake_case :Callable[..., Tuple[torch.Tensor]] , **snake_case :int ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : int = args_dim
A_ : List[Any] = nn.ModuleList([nn.Linear(snake_case , snake_case ) for dim in args_dim.values()] )
A_ : Optional[Any] = domain_map
def SCREAMING_SNAKE_CASE ( self :str , snake_case :torch.Tensor ):
'''simple docstring'''
A_ : Tuple = [proj(snake_case ) for proj in self.proj]
return self.domain_map(*snake_case )
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :Optional[Any] , snake_case :Optional[Any] ):
'''simple docstring'''
super().__init__()
A_ : Optional[Any] = function
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[str] , *snake_case :str ):
'''simple docstring'''
return self.function(snake_case , *snake_case )
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self :Dict , snake_case :int = 1 ):
'''simple docstring'''
A_ : Union[str, Any] = dim
A_ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[Any] ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*snake_case )
else:
return Independent(self.distribution_class(*snake_case ) , 1 )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :List[Any] , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ):
'''simple docstring'''
A_ : Optional[int] = self._base_distribution(snake_case )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(snake_case , loc=snake_case , scale=snake_case , event_dim=self.event_dim )
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return len(self.event_shape )
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int ):
'''simple docstring'''
return ParameterProjection(
in_features=snake_case , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def SCREAMING_SNAKE_CASE ( self :Tuple , *snake_case :torch.Tensor ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :torch.Tensor ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(snake_case ) + 4.0 )) / 2.0
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase = StudentT
@classmethod
def SCREAMING_SNAKE_CASE ( cls :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :torch.Tensor ):
'''simple docstring'''
A_ : Dict = cls.squareplus(snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
A_ : Optional[int] = 2.0 + cls.squareplus(snake_case )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = {"loc": 1, "scale": 1}
__UpperCamelCase = Normal
@classmethod
def SCREAMING_SNAKE_CASE ( cls :Any , snake_case :torch.Tensor , snake_case :torch.Tensor ):
'''simple docstring'''
A_ : Tuple = cls.squareplus(snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = {"total_count": 1, "logits": 1}
__UpperCamelCase = NegativeBinomial
@classmethod
def SCREAMING_SNAKE_CASE ( cls :Optional[Any] , snake_case :torch.Tensor , snake_case :torch.Tensor ):
'''simple docstring'''
A_ : Any = cls.squareplus(snake_case )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ , A_ : Any = distr_args
if self.dim == 1:
return self.distribution_class(total_count=snake_case , logits=snake_case )
else:
return Independent(self.distribution_class(total_count=snake_case , logits=snake_case ) , 1 )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None ):
'''simple docstring'''
A_ , A_ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 300 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase : List[Any] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowerCAmelCase : Union[str, Any] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
_lowerCAmelCase : Optional[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
A_ : List[str] = len(references[0] )
if any(len(snake_case ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A_ : int = [[refs[i] for refs in references] for i in range(snake_case )]
A_ : Optional[Any] = TER(
normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , )
A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 300 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 299 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCamelCase__ = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BlenderbotSmallTokenizer
def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , )
UpperCAmelCase__ : List[Any] = add_prefix_space
def lowercase_ ( self : str , _A : Any , _A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 299 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Optional[int] = 16
UpperCAmelCase : Optional[int] = 32
def __lowerCamelCase ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 16 ):
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCamelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase = 8
else:
lowerCamelCase = None
return tokenizer.pad(
__snake_case , padding="""longest""" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : Any = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __snake_case ) == "1":
lowerCamelCase = 2
# New Code #
lowerCamelCase = int(args.gradient_accumulation_steps )
lowerCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config['lr']
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(__snake_case )
lowerCamelCase = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
with LocalSGD(
accelerator=__snake_case , model=__snake_case , local_sgd_steps=__snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case ):
lowerCamelCase = model(**__snake_case )
lowerCamelCase = output.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase = model(**__snake_case )
lowerCamelCase = outputs.logits.argmax(dim=-1 )
lowerCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __snake_case )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__snake_case , default=__snake_case , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__snake_case , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__snake_case , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 252 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : Dict, __snake_case : int ) -> list[int]:
"""simple docstring"""
A__ : int =int(__UpperCAmelCase )
# Initialize Result
A__ : str =[]
# Traverse through all denomination
for denomination in reversed(__UpperCAmelCase ):
# Find denominations
while int(__UpperCAmelCase ) >= int(__UpperCAmelCase ):
total_value -= int(__UpperCAmelCase )
answer.append(__UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__snake_case : List[Any] = []
__snake_case : Union[str, Any] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
__snake_case : Any = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
__snake_case : Optional[int] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
__snake_case : int = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__snake_case : Dict = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F"""Following is minimal change for {value}: """)
__snake_case : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 356 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = XLMRobertaTokenizer
__snake_case = XLMRobertaTokenizerFast
__snake_case = True
__snake_case = True
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Any =XLMRobertaTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] ="""<pad>"""
A__ : Any =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
A__ : int =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCAmelCase_ ) , 10_02 )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
A__ : List[Any] =XLMRobertaTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
A__ : Tuple =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Optional[int] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Optional[int] =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A__ : Union[str, Any] =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A__ : Dict =(self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : List[str] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Union[str, Any] =self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Optional[Any] =tempfile.mkdtemp()
A__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCAmelCase_ )
A__ : Union[str, Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A__ : List[str] =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
A__ : Any =tokenizer_r.from_pretrained(lowerCAmelCase_ )
A__ : Union[str, Any] =tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=True
A__ : List[str] =tempfile.mkdtemp()
A__ : List[str] =tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
A__ : List[Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
A__ : str =tokenizer_r.from_pretrained(lowerCAmelCase_ )
A__ : List[Any] =tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=False
A__ : List[str] =tempfile.mkdtemp()
A__ : Dict =tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
A__ : List[Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ : Optional[int] =tokenizer_r.from_pretrained(lowerCAmelCase_ )
A__ : str =tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
@cached_property
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_ , f.name )
A__ : Dict =XLMRobertaTokenizer(f.name , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =pickle.dumps(lowerCAmelCase_ )
pickle.loads(lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ : Any =self.get_tokenizer()
A__ : Any =self.get_rust_tokenizer()
A__ : Optional[Any] ="""I was born in 92000, and this is falsé."""
A__ : List[str] =tokenizer.tokenize(lowerCAmelCase_ )
A__ : int =rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : str =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
A__ : Dict =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Union[str, Any] =self.get_rust_tokenizer()
A__ : Union[str, Any] =tokenizer.encode(lowerCAmelCase_ )
A__ : Optional[Any] =rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
A__ : Optional[Any] ="""Hello World!"""
A__ : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : List[Any] =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A__ : Optional[Any] =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
# fmt: off
A__ : List[Any] ={"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 136 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Tuple = """ctrl"""
snake_case__ : Dict = ["""past_key_values"""]
snake_case__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[Any] , __lowerCamelCase : Any=246_534 , __lowerCamelCase : Optional[int]=256 , __lowerCamelCase : str=1_280 , __lowerCamelCase : int=8_192 , __lowerCamelCase : List[str]=48 , __lowerCamelCase : Union[str, Any]=16 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[str]=1E-6 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=True , **__lowerCamelCase : Union[str, Any] , ):
UpperCamelCase :int = vocab_size
UpperCamelCase :Tuple = n_positions
UpperCamelCase :Tuple = n_embd
UpperCamelCase :str = n_layer
UpperCamelCase :Dict = n_head
UpperCamelCase :Optional[Any] = dff
UpperCamelCase :Optional[int] = resid_pdrop
UpperCamelCase :List[str] = embd_pdrop
UpperCamelCase :Tuple = layer_norm_epsilon
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Any = use_cache
super().__init__(**__lowerCamelCase )
| 38 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case__ : ClassVar[Features] = Features({"""audio""": Audio()} )
snake_case__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
snake_case__ : str = "audio"
snake_case__ : str = "transcription"
def _A ( self : List[str] , __lowerCamelCase : Dict ):
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowerCamelCase ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
UpperCamelCase :int = copy.deepcopy(self )
UpperCamelCase :Any = self.input_schema.copy()
UpperCamelCase :List[str] = features[self.audio_column]
UpperCamelCase :List[Any] = input_schema
return task_template
@property
def _A ( self : Optional[int] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 38 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __snake_case ( _lowerCAmelCase : dict , _lowerCAmelCase : str , _lowerCAmelCase : set , _lowerCAmelCase : set , _lowerCAmelCase : dict , _lowerCAmelCase : dict , _lowerCAmelCase : PriorityQueue , _lowerCAmelCase : dict , _lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ : Optional[int] = cst_fwd.get(UpperCamelCase__ , np.inf )
A_ : Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ : Optional[int] = new_cost_f
A_ : str = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ : Any = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : dict , _lowerCAmelCase : dict ) -> int:
A_ : str = -1
A_ : Dict = set()
A_ : Union[str, Any] = set()
A_ : Optional[int] = {source: 0}
A_ : List[Any] = {destination: 0}
A_ : Union[str, Any] = {source: None}
A_ : List[str] = {destination: None}
A_ : int = PriorityQueue()
A_ : List[Any] = PriorityQueue()
A_ : Optional[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ : Optional[int] = queue_forward.get()
visited_forward.add(UpperCamelCase__ )
A_ , A_ : Tuple = queue_backward.get()
visited_backward.add(UpperCamelCase__ )
A_ : str = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
A_ : str = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ : List[Any] = shortest_distance
return shortest_path_distance
_lowerCAmelCase : Dict = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
_lowerCAmelCase : Dict = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Union[str, Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 | 0 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def __A ( *a__ , **a__ ):
pass
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ) -> str:
_lowerCAmelCase : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ) -> Dict:
_lowerCAmelCase : Dict = np.array(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = npimg.shape
return {"hash": hashimage(_lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A ( unittest.TestCase ):
_UpperCamelCase : Optional[Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_UpperCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __A ( self , a__ , a__ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def __A ( self ):
pass
@slow
@require_torch
def __A ( self ):
_lowerCAmelCase : Dict = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
_lowerCAmelCase : Optional[int] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
_lowerCAmelCase : Dict = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9_9_6_7},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.9_9_3},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9_9_0_9},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9_8_7_9},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9_8_3_4},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9_7_1_6},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9_6_1_2},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9_5_9_9},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9_5_5_2},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9_5_3_2},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9_5_1_6},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9_4_9_9},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9_4_8_3},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9_4_6_4},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9_4_0_8},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9_3_3_5},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9_3_2_6},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9_2_6_2},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8_9_9_9},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8_9_8_6},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8_9_8_4},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8_8_7_3},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = """facebook/sam-vit-huge"""
_lowerCAmelCase : Any = pipeline("""mask-generation""" , model=a__ )
_lowerCAmelCase : Optional[int] = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_lowerCAmelCase : Tuple = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1_0},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
] , )
| 44 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ):
A_ : List[Any] = parent
A_ : str = batch_size
A_ : List[Any] = seq_length
A_ : Dict = is_training
A_ : List[Any] = use_attention_mask
A_ : Any = use_token_type_ids
A_ : Optional[int] = use_labels
A_ : Tuple = vocab_size
A_ : List[str] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Union[str, Any] = type_vocab_size
A_ : int = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[str] = num_choices
def _a (self ):
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Any = None
if self.use_attention_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Union[str, Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowercase , )
return config, input_ids, attention_mask
def _a (self ):
A_ : List[str] = self.prepare_config_and_inputs()
A_, A_, A_ : str = config_and_inputs
A_ : Any = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a (self ):
A_ : Tuple = FlaxDistilBertModelTester(self )
@slow
def _a (self ):
for model_class_name in self.all_model_classes:
A_ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" )
A_ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : List[str] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A_ : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A_ : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A_ : Optional[int] = model(lowercase , attention_mask=lowercase )[0]
A_ : Optional[Any] = (1, 11, 768)
self.assertEqual(output.shape , lowercase )
A_ : Union[str, Any] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) ) | 206 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ ):
snake_case__ : Dict = 1
@register_to_config
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=2_0_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-3 ) -> Optional[int]:
a_ : Tuple = None
a_ : int = None
a_ : Tuple = None
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None ) -> List[str]:
a_ : Tuple = torch.linspace(1 , self.config.sampling_eps , SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> Tuple:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
a_ : Tuple = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
a_ : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
a_ : Dict = std.flatten()
while len(std.shape ) < len(score.shape ):
a_ : str = std.unsqueeze(-1 )
a_ : List[str] = -score / std
# compute
a_ : List[str] = -1.0 / len(self.timesteps )
a_ : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
a_ : Optional[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
a_ : List[str] = beta_t.unsqueeze(-1 )
a_ : Optional[Any] = -0.5 * beta_t * x
a_ : Tuple = torch.sqrt(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = drift - diffusion**2 * score
a_ : List[str] = x + drift * dt
# add noise
a_ : Optional[Any] = randn_tensor(x.shape , layout=x.layout , generator=SCREAMING_SNAKE_CASE__ , device=x.device , dtype=x.dtype )
a_ : Optional[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : int ) -> Tuple:
return self.config.num_train_timesteps
| 120 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _a ( lowerCAmelCase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __A ( UpperCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def __A ( self : Optional[Any] ):
raise NotImplementedError() | 312 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = RobertaTokenizer
A_ : Any = RobertaTokenizerFast
A_ : Dict = True
A_ : Tuple = {'cls_token': '<s>'}
def _UpperCAmelCase ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
_a = '''lower newer'''
_a = '''lower newer'''
return input_text, output_text
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''lower newer'''
_a = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a = tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained('''roberta-base''' )
_a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
_a = '''Encode this sequence.'''
_a = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing spaces after special tokens
_a = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space
_a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
_a = '''Encode <mask> sequence'''
_a = '''Encode <mask>sequence'''
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
_a = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _UpperCAmelCase ( self ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_a = F'{text_of_1_token} {text_of_1_token}'
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) | 362 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution()) | 153 | 0 |
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _snake_case ( UpperCamelCase : List[str] ):
if isinstance(UpperCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class SCREAMING_SNAKE_CASE__ :
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = TFVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = {"""vision_model""": vision_model, """text_model""": text_model}
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = after_output[0].numpy()
UpperCAmelCase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : Tuple = to_atuple(vision_model.config.image_size )
UpperCAmelCase : Optional[int] = to_atuple(vision_model.config.patch_size )
UpperCAmelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase : List[Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = np.abs((a - b) ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"Difference between torch and flax is {diff} (>= {tol})." )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.get_pretrained_model_and_inputs()
UpperCAmelCase : List[Any] = model_a(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = model_a(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = after_outputs[0].numpy()
UpperCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
UpperCAmelCase : int = 13
UpperCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : int = random_attention_mask([batch_size, 4] )
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFViTModel(_SCREAMING_SNAKE_CASE , name="""vision_model""" )
UpperCAmelCase : Optional[Any] = TFBertModel(_SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = TFViTModelTester(self )
UpperCAmelCase : int = TFBertModelTester(self )
UpperCAmelCase : str = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase : int = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
UpperCAmelCase : Union[str, Any] = 13
UpperCAmelCase : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : Optional[Any] = random_attention_mask([batch_size, 4] )
UpperCAmelCase : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase : Dict = to_atuple(vision_model.config.image_size )
UpperCAmelCase : Tuple = to_atuple(vision_model.config.patch_size )
UpperCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase : List[str] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase : List[str] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFDeiTModel(_SCREAMING_SNAKE_CASE , name="""vision_model""" )
UpperCAmelCase : Union[str, Any] = TFRobertaModel(_SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFDeiTModelTester(self )
UpperCAmelCase : List[Any] = TFRobertaModelTester(self )
UpperCAmelCase : List[Any] = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Optional[int] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
UpperCAmelCase : Optional[int] = 13
UpperCAmelCase : Union[str, Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : Union[str, Any] = random_attention_mask([batch_size, 4] )
UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = TFCLIPVisionModel(_SCREAMING_SNAKE_CASE , name="""vision_model""" )
UpperCAmelCase : List[str] = TFBertModel(_SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = TFCLIPVisionModelTester(self )
UpperCAmelCase : Dict = TFBertModelTester(self )
UpperCAmelCase : str = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase : Optional[Any] = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
UpperCAmelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase : Optional[int] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
UpperCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase : str = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 109 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ):
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = rotary_pct
snake_case_ : Dict = rotary_emb_base
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = use_cache
snake_case_ : Optional[int] = tie_word_embeddings
snake_case_ : Any = use_parallel_residual
snake_case_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ )
snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 264 | 0 |
def _a ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return lst
UpperCamelCase__ : Union[str, Any] = 1
while i < len(SCREAMING_SNAKE_CASE ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCamelCase__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__UpperCamelCase : List[Any] = input("Enter numbers separated by a comma:\n").strip()
__UpperCamelCase : int = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 51 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : NestedDataStructureLike[PathLike] , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Optional[Any] = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths}
UpperCamelCase__ : Optional[Any] = Text(
cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
UpperCamelCase__ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
UpperCamelCase__ : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 51 | 1 |
"""simple docstring"""
import functools
from typing import Any
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or len(UpperCAmelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(
isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
snake_case_ = {}
snake_case_ = 'WORD_KEEPER'
for word in words:
snake_case_ = trie
for c in word:
if c not in trie_node:
snake_case_ = {}
snake_case_ = trie_node[c]
snake_case_ = True
snake_case_ = len(UpperCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCAmelCase ) -> bool:
if index == len_string:
return True
snake_case_ = trie
for i in range(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = trie_node.get(string[i] , UpperCAmelCase )
if trie_node is None:
return False
if trie_node.get(UpperCAmelCase , UpperCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | """simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCAmelCase ( ) -> int:
snake_case_ = HfArgumentParser(UpperCAmelCase )
snake_case_ = parser.parse_args_into_dataclasses()[0]
snake_case_ = TensorFlowBenchmark(args=UpperCAmelCase )
try:
snake_case_ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
snake_case_ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
snake_case_ = ' '.join(str(UpperCAmelCase ).split(' ' )[:-1] )
snake_case_ = ''
snake_case_ = eval(str(UpperCAmelCase ).split(' ' )[-1] )
snake_case_ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
snake_case_ = full_error_msg + begin_error_msg + str(UpperCAmelCase )
raise ValueError(UpperCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 69 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = ["""image_processor""", """tokenizer"""]
snake_case__ : Dict = """CLIPImageProcessor"""
snake_case__ : Tuple = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str ):
UpperCamelCase :Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCAmelCase , )
UpperCamelCase :Any = kwargs.pop("""feature_extractor""" )
UpperCamelCase :int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self : List[Any] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : str=None , **__lowerCamelCase : Union[str, Any] ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase :Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
UpperCamelCase :List[Any] = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
UpperCamelCase :List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def _A ( self : Tuple , *__lowerCamelCase : str , **__lowerCamelCase : int ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def _A ( self : List[str] , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def _A ( self : Optional[int] ):
UpperCamelCase :Tuple = self.tokenizer.model_input_names
UpperCamelCase :List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _A ( self : List[Any] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCAmelCase , )
return self.image_processor_class
@property
def _A ( self : Optional[Any] ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCAmelCase , )
return self.image_processor
| 354 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase :List[Any] = model_type_to_module_name(__magic_name__ )
UpperCamelCase :Union[str, Any] = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(__magic_name__ , __magic_name__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__magic_name__ , """__name__""" , __magic_name__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase :List[str] = importlib.import_module("""transformers""" )
if hasattr(__magic_name__ , __magic_name__ ):
return getattr(__magic_name__ , __magic_name__ )
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, os.PathLike] , __magic_name__ : Optional[Union[str, os.PathLike]] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : Optional[Dict[str, str]] = None , __magic_name__ : Optional[Union[bool, str]] = None , __magic_name__ : Optional[str] = None , __magic_name__ : bool = False , **__magic_name__ : Any , ) -> Dict:
"""simple docstring"""
UpperCamelCase :Dict = get_file_from_repo(
__magic_name__ , __magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , resume_download=__magic_name__ , proxies=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , local_files_only=__magic_name__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(__magic_name__ , encoding="""utf-8""" ) as reader:
return json.load(__magic_name__ )
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any ):
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def _A ( cls : List[str] , __lowerCamelCase : List[Any] , **__lowerCamelCase : int ):
UpperCamelCase :Optional[Any] = kwargs.pop("""config""" , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = kwargs.pop("""trust_remote_code""" , __lowerCamelCase )
UpperCamelCase :Any = True
UpperCamelCase , UpperCamelCase :int = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :Union[str, Any] = config_dict.get("""image_processor_type""" , __lowerCamelCase )
UpperCamelCase :int = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase :Optional[Any] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase :Optional[int] = config_dict.pop("""feature_extractor_type""" , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
UpperCamelCase :str = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase :Any = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
UpperCamelCase :Dict = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
UpperCamelCase :Optional[Any] = getattr(__lowerCamelCase , """image_processor_type""" , __lowerCamelCase )
if hasattr(__lowerCamelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase :Any = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
UpperCamelCase :Tuple = image_processor_class_from_name(__lowerCamelCase )
UpperCamelCase :List[Any] = image_processor_auto_map is not None
UpperCamelCase :Any = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase :Optional[int] = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
UpperCamelCase :Optional[int] = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :int = kwargs.pop("""code_revision""" , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase :int = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def _A ( __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 62 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __magic_name__ ( lowerCAmelCase_ ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =tempfile.mkdtemp()
__a =5
# Realm tok
__a =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a =os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(__snake_case , exist_ok=__snake_case )
__a =os.path.join(__snake_case , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__a =os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(__snake_case , exist_ok=__snake_case )
def __magic_name__ ( self ) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =RealmConfig(num_block_records=self.num_block_records )
return config
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=__snake_case , )
return block_records
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.get_config()
__a =self.get_dummy_retriever()
__a =retriever.tokenizer
__a =np.array([0, 3] , dtype='long' )
__a =tokenizer(['Test question'] ).input_ids
__a =tokenizer(
['the fourth'] , add_special_tokens=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , ).input_ids
__a =config.reader_seq_len
__a , __a , __a , __a =retriever(
__snake_case , __snake_case , answer_ids=__snake_case , max_length=__snake_case , return_tensors='np' )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.get_config()
__a =self.get_dummy_retriever()
__a =retriever.tokenizer
__a =np.array([0, 3, 5] , dtype='long' )
__a =tokenizer(['Test question'] ).input_ids
__a =tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , ).input_ids
__a =config.reader_seq_len
__a , __a , __a , __a =retriever(
__snake_case , __snake_case , answer_ids=__snake_case , max_length=__snake_case , return_tensors='np' )
self.assertEqual([False, True, True] , __snake_case )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __snake_case )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __snake_case )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
__a =retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
__a =os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
__a =RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
| 218 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[-1] * len(_snake_case )
def dfs(_snake_case : Dict , _snake_case : Any ):
__a =True
__a =c
for u in graph[v]:
if not visited[u]:
dfs(_snake_case , 1 - c )
for i in range(len(_snake_case ) ):
if not visited[i]:
dfs(_snake_case , 0 )
for i in range(len(_snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 218 | 1 |
from copy import deepcopy
class lowercase :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : list[int] | None = None , __UpperCAmelCase : int | None = None ) -> None:
if arr is None and size is not None:
UpperCAmelCase_= size
UpperCAmelCase_= [0] * size
elif arr is not None:
self.init(lowerCamelCase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : list[int] ) -> None:
UpperCAmelCase_= len(lowerCamelCase__ )
UpperCAmelCase_= deepcopy(lowerCamelCase__ )
for i in range(1 , self.size ):
UpperCAmelCase_= self.next_(lowerCamelCase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def _SCREAMING_SNAKE_CASE ( self : str ) -> list[int]:
UpperCAmelCase_= self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCAmelCase_= self.next_(lowerCamelCase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : int ) -> int:
return index + (index & (-index))
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : int ) -> int:
return index - (index & (-index))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCAmelCase_= self.next_(lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
self.add(lowerCamelCase__ , value - self.get(lowerCamelCase__ ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : int ) -> int:
if right == 0:
return 0
UpperCAmelCase_= self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCAmelCase_= self.prev(lowerCamelCase__ )
return result
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return self.prefix(lowerCamelCase__ ) - self.prefix(lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : int ) -> int:
return self.query(lowerCamelCase__ , index + 1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : int ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCAmelCase_= 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCAmelCase_= 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Tuple = KandinskyInpaintPipeline
a__ : Tuple = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
a__ : str = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
a__ : List[Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a__ : Union[str, Any] = False
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
UpperCAmelCase_= XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_= MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase_= MultilingualCLIP(__UpperCAmelCase )
UpperCAmelCase_= text_encoder.eval()
return text_encoder
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase_= {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase_= UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase_= VQModel(**self.dummy_movq_kwargs )
return model
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_= self.dummy_text_encoder
UpperCAmelCase_= self.dummy_tokenizer
UpperCAmelCase_= self.dummy_unet
UpperCAmelCase_= self.dummy_movq
UpperCAmelCase_= DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__UpperCAmelCase , )
UpperCAmelCase_= {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=0 ) -> Dict:
UpperCAmelCase_= floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
UpperCAmelCase_= floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCAmelCase )
# create init_image
UpperCAmelCase_= floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_= Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
UpperCAmelCase_= np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase_= 0
if str(__UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_= """cpu"""
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= self.pipeline_class(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
UpperCAmelCase_= output.images
UpperCAmelCase_= pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
UpperCAmelCase_= load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
UpperCAmelCase_= load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase_= np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase_= 0
UpperCAmelCase_= """a hat"""
UpperCAmelCase_= KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
UpperCAmelCase_= KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
UpperCAmelCase_= pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase_, UpperCAmelCase_= pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase_= pipeline(
__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
UpperCAmelCase_= output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 277 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
_UpperCamelCase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : str = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : Optional[Any] = TaTokenizer
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=100 , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : List[Any] = [f'<extra_id_{i}>' for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__UpperCAmelCase : Any = len(set(filter(lambda __UpperCAmelCase : bool("""extra_id_""" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = vocab_file
__UpperCAmelCase : Any = False if not self.vocab_file else True
__UpperCAmelCase : Optional[int] = extra_ids
@staticmethod
def __A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__UpperCAmelCase : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __UpperCAmelCase , )
return max_model_length
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Any = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__UpperCAmelCase : Optional[Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self ) -> Any:
'''simple docstring'''
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(r"""<extra_id_\d+>""" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return [self.convert_tokens_to_ids(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 254 | 1 |
import os
from datetime import datetime as dt
from github import Github
__UpperCAmelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __lowerCamelCase ( ):
a__: List[Any] =Github(os.environ["GITHUB_TOKEN"] )
a__: List[str] =g.get_repo("huggingface/diffusers" )
a__: List[Any] =repo.get_issues(state="open" )
for issue in open_issues:
a__: List[str] =sorted(issue.get_comments() , key=lambda __magic_name__ : i.created_at , reverse=__magic_name__ )
a__: Dict =comments[0] if len(__magic_name__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 42 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase = 5_00_00
__UpperCAmelCase = 50_00
__UpperCAmelCase , __UpperCAmelCase = os.path.split(__file__)
__UpperCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : int ):
for i in range(__magic_name__ ):
a__: int =dataset[i]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : Any , __magic_name__ : Union[str, Any] ):
for i in range(0 , len(__magic_name__ ) , __magic_name__ ):
a__: List[str] =dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ):
with dataset.formatted_as(type=__magic_name__ ):
for i in range(__magic_name__ ):
a__: Optional[Any] =dataset[i]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] ):
with dataset.formatted_as(type=__magic_name__ ):
for i in range(0 , __magic_name__ , __magic_name__ ):
a__: List[Any] =dataset[i : i + batch_size]
def __lowerCamelCase ( ):
a__: Union[str, Any] ={"num examples": SPEED_TEST_N_EXAMPLES}
a__: int =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
a__: Optional[Any] =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
a__: str =datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
a__: List[str] =generate_example_dataset(
os.path.join(__magic_name__ , "dataset.arrow" ) , __magic_name__ , num_examples=__magic_name__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(__magic_name__ ) )
a__: str =func(__magic_name__ , **__magic_name__ )
print("shuffling dataset" )
a__: List[str] =dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(__magic_name__ ) )
a__: Optional[int] =func(
__magic_name__ , **__magic_name__ )
with open(__magic_name__ , "wb" ) as f:
f.write(json.dumps(__magic_name__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 42 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowercase : Union[str, Any] = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318 | 1 |
import math
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
return math.pow(__lowerCAmelCase , 2 ) - a
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> float:
return 2 * x
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> float:
UpperCamelCase__ : List[Any] = 2.0
while start <= a:
UpperCamelCase__ : int = math.pow(__lowerCAmelCase , 2 )
return start
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = 9999 , __lowerCAmelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
if a < 0:
raise ValueError("math domain error" )
UpperCamelCase__ : Any = get_initial_point(__lowerCAmelCase )
for _ in range(__lowerCAmelCase ):
UpperCamelCase__ : Dict = value
UpperCamelCase__ : Tuple = value - fx(__lowerCAmelCase , __lowerCAmelCase ) / fx_derivative(__lowerCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod() | 367 |
import re
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
UpperCamelCase__ : Union[str, Any] = re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] ='''0094702343221'''
print(is_sri_lankan_phone_number(phone)) | 196 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 |
"""simple docstring"""
import qiskit
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
A_ : str = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A_ : Optional[Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = half_adder(1, 1)
print(F"Half Adder Output Qubit Counts: {counts}") | 286 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _UpperCamelCase : List[str], _UpperCamelCase : Dict, _UpperCamelCase : Optional[int], ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 | '''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
A_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCamelCase ) )
return round(_UpperCamelCase, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase ( _lowerCAmelCase : np.ndarray ) -> np.ndarray:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def UpperCamelCase ( _lowerCAmelCase : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def UpperCamelCase ( _lowerCAmelCase : np.ndarray, _lowerCAmelCase : np.ndarray ) -> np.ndarray:
_UpperCAmelCase : Union[str, Any] = np.zeros_like(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_UpperCAmelCase : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_UpperCAmelCase : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_UpperCAmelCase : List[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCamelCase__ : Dict = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
lowerCamelCase__ : int = np.array(Image.open(lena_path))
# kernel to be applied
lowerCamelCase__ : List[str] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCamelCase__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCamelCase__ : List[str] = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 246 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int ) -> int:
_UpperCAmelCase : str = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_UpperCAmelCase : Dict = n - k
# Calculate C(n,k)
for i in range(_lowerCAmelCase ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
return binomial_coefficient(2 * node_count, _lowerCAmelCase ) // (node_count + 1)
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
_UpperCAmelCase : str = 1
for i in range(1, n + 1 ):
result *= i
return result
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
return catalan_number(_lowerCAmelCase ) * factorial(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 246 | 1 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
def is_in_circle(_UpperCamelCase , _UpperCamelCase ) -> bool:
__lowerCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__lowerCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__lowerCAmelCase = proportion * 4
print(f"The estimated value of pi is {pi_estimate}" )
print(f"The numpy value of pi is {pi}" )
print(f"The total error is {abs(pi - pi_estimate )}" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(_UpperCamelCase , _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 ):
'''simple docstring'''
def identity_function(_UpperCamelCase ) -> float:
return x
__lowerCAmelCase = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {expected_value}" )
print(f"Total error is {abs(estimated_value - expected_value )}" )
print("******************" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
def function_to_integrate(_UpperCamelCase ) -> float:
return sqrt(4.0 - x * x )
__lowerCAmelCase = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {pi}" )
print(f"Total error is {abs(estimated_value - pi )}" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
"""simple docstring"""
from __future__ import annotations
import time
A : Union[str, Any] = list[tuple[int, int]]
A : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a ):
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = parent
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , __a )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , __a )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = False
def snake_case ( self ):
while self.node_queue:
__lowerCAmelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase = True
return self.retrace_path(__a )
__lowerCAmelCase = self.get_successors(__a )
for node in successors:
self.node_queue.append(__a )
if not self.reached:
return [self.start.pos]
return None
def snake_case ( self , __a ):
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__a , __a , self.target.pos_y , self.target.pos_x , __a ) )
return successors
def snake_case ( self , __a ):
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = BreadthFirstSearch(__a , __a )
__lowerCAmelCase = BreadthFirstSearch(__a , __a )
__lowerCAmelCase = False
def snake_case ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCAmelCase = self.fwd_bfs.node_queue.pop(0 )
__lowerCAmelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCAmelCase = True
return self.retrace_bidirectional_path(
__a , __a )
__lowerCAmelCase = current_bwd_node
__lowerCAmelCase = current_fwd_node
__lowerCAmelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(__a ),
self.bwd_bfs: self.bwd_bfs.get_successors(__a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case ( self , __a , __a ):
__lowerCAmelCase = self.fwd_bfs.retrace_path(__a )
__lowerCAmelCase = self.bwd_bfs.retrace_path(__a )
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A : List[Any] = (0, 0)
A : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Any = time.time()
A : Dict = BreadthFirstSearch(init, goal)
A : Any = bfs.search()
A : List[str] = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
A : Optional[Any] = time.time()
A : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
A : Any = bd_bfs.search()
A : str = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 259 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
a__ : str = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 161 | 0 |
'''simple docstring'''
import string
import numpy
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a, __snake_case )
class lowerCamelCase :
'''simple docstring'''
__snake_case = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__snake_case = numpy.vectorize(lambda lowercase_ : x % 36 )
__snake_case = numpy.vectorize(lowercase_ )
def __init__( self : int , lowerCAmelCase_ : numpy.ndarray ) -> None:
'''simple docstring'''
A__ : Any =self.modulus(lowerCAmelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
A__ : List[Any] =encrypt_key.shape[0]
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> int:
'''simple docstring'''
return self.key_string.index(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
return self.key_string[round(lowerCAmelCase_ )]
def lowercase__ ( self : Optional[int] ) -> None:
'''simple docstring'''
A__ : int =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
A__ : List[Any] =det % len(self.key_string )
A__ : Dict =len(self.key_string )
if greatest_common_divisor(lowerCAmelCase_ , len(self.key_string ) ) != 1:
A__ : Dict =(
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =[char for char in text.upper() if char in self.key_string]
A__ : Optional[int] =chars[-1]
while len(lowerCAmelCase_ ) % self.break_key != 0:
chars.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
A__ : str =self.process_text(text.upper() )
A__ : Any =""""""
for i in range(0 , len(lowerCAmelCase_ ) - self.break_key + 1 , self.break_key ):
A__ : List[str] =text[i : i + self.break_key]
A__ : List[Any] =[self.replace_letters(lowerCAmelCase_ ) for char in batch]
A__ : int =numpy.array([vec] ).T
A__ : int =self.modulus(self.encrypt_key.dot(lowerCAmelCase_ ) ).T.tolist()[
0
]
A__ : Any ="""""".join(
self.replace_digits(lowerCAmelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowercase__ ( self : str ) -> numpy.ndarray:
'''simple docstring'''
A__ : Optional[int] =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
A__ : List[str] =det % len(self.key_string )
A__ : List[str] =None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
A__ : Tuple =i
break
A__ : Optional[int] =(
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowerCAmelCase_ ) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
A__ : str =self.make_decrypt_key()
A__ : List[Any] =self.process_text(text.upper() )
A__ : Tuple =""""""
for i in range(0 , len(lowerCAmelCase_ ) - self.break_key + 1 , self.break_key ):
A__ : str =text[i : i + self.break_key]
A__ : Optional[Any] =[self.replace_letters(lowerCAmelCase_ ) for char in batch]
A__ : List[Any] =numpy.array([vec] ).T
A__ : Optional[Any] =self.modulus(decrypt_key.dot(lowerCAmelCase_ ) ).T.tolist()[0]
A__ : Tuple ="""""".join(
self.replace_digits(lowerCAmelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ : Tuple =int(input("""Enter the order of the encryption key: """ ) )
A__ : Dict =[]
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(__snake_case ):
A__ : Optional[int] =[int(__snake_case ) for x in input().split()]
hill_matrix.append(__snake_case )
A__ : Union[str, Any] =HillCipher(numpy.array(__snake_case ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
A__ : List[Any] =input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
A__ : Dict =input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(__snake_case ) )
elif option == "2":
A__ : Optional[int] =input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 371 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'FlavaImageProcessor'
__snake_case = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase_ , )
A__ : Optional[Any] =kwargs.pop("""feature_extractor""" )
A__ : List[Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : int =self.image_processor
def __call__( self : Union[str, Any] , lowerCAmelCase_ : Optional[ImageInput] = None , lowerCAmelCase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A__ : int =self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if images is not None:
A__ : List[str] =self.image_processor(
lowerCAmelCase_ , return_image_mask=lowerCAmelCase_ , return_codebook_pixels=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowercase__ ( self : Any , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : Any =self.tokenizer.model_input_names
A__ : Optional[Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , )
return self.image_processor_class
@property
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , )
return self.image_processor
| 136 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__lowerCAmelCase : Dict = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = """maskformer"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"""hidden_size""": """mask_feature_size"""}
SCREAMING_SNAKE_CASE_ : int = ["""resnet""", """swin"""]
SCREAMING_SNAKE_CASE_ : Dict = ["""detr"""]
def __init__( self : int , __lowerCamelCase : int = 2_56 , __lowerCamelCase : int = 2_56 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[Dict] = None , __lowerCamelCase : Optional[Dict] = None , __lowerCamelCase : float = 0.02 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : float = 20.0 , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : int , ) -> Optional[int]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
a = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = backbone_config.pop("model_type" )
a = CONFIG_MAPPING[backbone_model_type]
a = config_class.from_dict(__lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
a = DetrConfig()
else:
# verify that the decoder is supported
a = (
decoder_config.pop("model_type" ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = CONFIG_MAPPING[decoder_type]
a = config_class.from_dict(__lowerCamelCase )
a = backbone_config
a = decoder_config
# main feature dimension for the model
a = fpn_feature_size
a = mask_feature_size
# initializer
a = init_std
a = init_xavier_std
# Hungarian matcher && loss
a = cross_entropy_weight
a = dice_weight
a = mask_weight
a = use_auxiliary_loss
a = no_object_weight
a = output_auxiliary_logits
a = self.decoder_config.encoder_attention_heads
a = self.decoder_config.num_hidden_layers
super().__init__(**__lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , __lowerCamelCase : PretrainedConfig , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : Tuple ) -> List[str]:
return cls(
backbone_config=__lowerCamelCase , decoder_config=__lowerCamelCase , **__lowerCamelCase , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, any]:
a = copy.deepcopy(self.__dict__ )
a = self.backbone_config.to_dict()
a = self.decoder_config.to_dict()
a = self.__class__.model_type
return output
| 107 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __magic_name__ ( A : Dict, A : int, A : Optional[int] ):
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __magic_name__ ( A : np.ndarray, A : Optional[str], A : Optional[str] = None ):
'''simple docstring'''
a = tesseract_config if tesseract_config is not None else ""
# apply OCR
a = to_pil_image(A )
a , a = pil_image.size
a = pytesseract.image_to_data(A, lang=A, output_type="dict", config=A )
a , a , a , a , a = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
a = [idx for idx, word in enumerate(A ) if not word.strip()]
a = [word for idx, word in enumerate(A ) if idx not in irrelevant_indices]
a = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
a = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
a = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
a = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
a = []
for x, y, w, h in zip(A, A, A, A ):
a = [x, y, x + w, y + h]
actual_boxes.append(A )
# finally, normalize the bounding boxes
a = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A, A, A ) )
assert len(A ) == len(A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["""pixel_values"""]
def __init__( self : int , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "" , **__lowerCamelCase : Tuple , ) -> None:
super().__init__(**__lowerCamelCase )
a = size if size is not None else {"height": 2_24, "width": 2_24}
a = get_size_dict(__lowerCamelCase )
a = do_resize
a = size
a = resample
a = apply_ocr
a = ocr_lang
a = tesseract_config
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ) -> np.ndarray:
a = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
a = (size["height"], size["width"])
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Optional[Any] , ) -> PIL.Image.Image:
a = do_resize if do_resize is not None else self.do_resize
a = size if size is not None else self.size
a = get_size_dict(__lowerCamelCase )
a = resample if resample is not None else self.resample
a = apply_ocr if apply_ocr is not None else self.apply_ocr
a = ocr_lang if ocr_lang is not None else self.ocr_lang
a = tesseract_config if tesseract_config is not None else self.tesseract_config
a = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
a = [to_numpy_array(__lowerCamelCase ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
a = []
a = []
for image in images:
a , a = apply_tesseract(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
words_batch.append(__lowerCamelCase )
boxes_batch.append(__lowerCamelCase )
if do_resize:
a = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
a = [flip_channel_order(__lowerCamelCase ) for image in images]
a = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
a = BatchFeature(data={"pixel_values": images} , tensor_type=__lowerCamelCase )
if apply_ocr:
a = words_batch
a = boxes_batch
return data
| 107 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :Any , _lowercase :Any=13 , _lowercase :Tuple=30 , _lowercase :str=2 , _lowercase :List[Any]=3 , _lowercase :Dict=True , _lowercase :Dict=True , _lowercase :Optional[Any]=32 , _lowercase :Union[str, Any]=5 , _lowercase :Any=4 , _lowercase :Union[str, Any]=37 , _lowercase :str="gelu" , _lowercase :List[str]=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Any=10 , _lowercase :Any=0.02 , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase ( self :int , _lowercase :int , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = FlaxViTModel(config=_lowercase )
lowercase__ = model(_lowercase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (self.image_size, self.image_size)
lowercase__ = (self.patch_size, self.patch_size)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase ( self :int , _lowercase :Any , _lowercase :int ):
'''simple docstring'''
lowercase__ = self.type_sequence_label_size
lowercase__ = FlaxViTForImageClassification(config=_lowercase )
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = FlaxViTForImageClassification(_lowercase )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(_lowercase )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( lowercase_ , unittest.TestCase ):
__lowerCamelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = FlaxViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
lowercase__ = model_class(_lowercase )
@jax.jit
def model_jitted(_lowercase :str , **_lowercase :Optional[int] ):
return model(pixel_values=_lowercase , **_lowercase )
with self.subTest("JIT Enabled" ):
lowercase__ = model_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ = model_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
lowercase__ = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(_lowercase )
| 201 |
from __future__ import annotations
from collections import deque
class lowerCAmelCase :
def __init__( self :List[Any] , _lowercase :list[str] ):
'''simple docstring'''
lowercase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_lowercase )
self.set_fail_transitions()
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase ( self :List[str] , _lowercase :str ):
'''simple docstring'''
lowercase__ = 0
for character in keyword:
lowercase__ = self.find_next_state(_lowercase , _lowercase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase__ = len(self.adlist ) - 1
else:
lowercase__ = next_state
self.adlist[current_state]["output"].append(_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowercase )
lowercase__ = 0
while q:
lowercase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowercase )
lowercase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(_lowercase , self.adlist[child]["value"] ) is None
and state != 0
):
lowercase__ = self.adlist[state]["fail_state"]
lowercase__ = self.find_next_state(
_lowercase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
lowercase__ = 0
lowercase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def UpperCAmelCase ( self :Optional[Any] , _lowercase :str ):
'''simple docstring'''
lowercase__ = {} # returns a dict with keywords and list of its occurrences
lowercase__ = 0
for i in range(len(_lowercase ) ):
while (
self.find_next_state(_lowercase , string[i] ) is None
and current_state != 0
):
lowercase__ = self.adlist[current_state]["fail_state"]
lowercase__ = self.find_next_state(_lowercase , string[i] )
if next_state is None:
lowercase__ = 0
else:
lowercase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase__ = []
result[key].append(i - len(_lowercase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , ):
lowercase : Any = parent
lowercase : List[str] = batch_size
lowercase : Union[str, Any] = image_size
lowercase : List[str] = num_channels
lowercase : Any = num_stages
lowercase : Union[str, Any] = hidden_sizes
lowercase : Optional[int] = depths
lowercase : List[Any] = is_training
lowercase : Optional[Any] = use_labels
lowercase : List[Any] = intermediate_size
lowercase : str = hidden_act
lowercase : Union[str, Any] = type_sequence_label_size
lowercase : Any = initializer_range
lowercase : Optional[int] = out_features
lowercase : Tuple = num_labels
lowercase : Tuple = scope
lowercase : Tuple = num_stages
def __lowerCamelCase ( self ):
lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : List[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCamelCase ( self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowercase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=lowercase_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = UperNetForSemanticSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase : Dict = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.prepare_config_and_inputs()
(
lowercase
) : Any = config_and_inputs
lowercase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
A : Dict = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A : List[str] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
A : Any = False
A : Dict = False
A : List[Any] = False
A : str = False
A : Tuple = False
A : List[Any] = False
def __lowerCamelCase ( self ):
lowercase : str = UperNetModelTester(self )
lowercase : Any = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] = model_class(lowercase_ )
lowercase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] = [*signature.parameters.keys()]
lowercase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase : int = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def __lowerCamelCase ( self ):
lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Dict = _config_zero_init(lowercase_ )
lowercase : str = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowercase : Any = model_class(config=lowercase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def __lowerCamelCase ( self ):
pass
@slow
def __lowerCamelCase ( self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __lowercase ( ) ->Any:
"""simple docstring"""
lowercase : int = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
lowercase : Union[str, Any] = Image.open(A_ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : Any = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowercase : Tuple = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowercase_ )
lowercase : str = prepare_img()
lowercase : Optional[int] = processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
with torch.no_grad():
lowercase : int = model(**lowercase_ )
lowercase : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase_ , atol=1E-4 ) )
def __lowerCamelCase ( self ):
lowercase : Any = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowercase : List[str] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowercase_ )
lowercase : Dict = prepare_img()
lowercase : Tuple = processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
with torch.no_grad():
lowercase : List[Any] = model(**lowercase_ )
lowercase : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase : Optional[Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase_ , atol=1E-4 ) )
| 337 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''CLIPFeatureExtractor''']
__UpperCamelCase : Optional[Any] = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 | 0 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case : str =logging.get_logger(__name__)
__snake_case : str ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__snake_case : int =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
for attribute in key.split('''.'''):
lowerCAmelCase__ : Dict = getattr(lowerCamelCase_ ,lowerCamelCase_)
if weight_type is not None:
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ ,lowerCamelCase_).shape
else:
lowerCAmelCase__ : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase__ : List[str] = value
elif weight_type == "weight_g":
lowerCAmelCase__ : List[str] = value
elif weight_type == "weight_v":
lowerCAmelCase__ : List[str] = value
elif weight_type == "bias":
lowerCAmelCase__ : Any = value
else:
lowerCAmelCase__ : Any = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""")
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Any = fairseq_model.state_dict()
lowerCAmelCase__ : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase__ : Any = None
for name, value in fairseq_dict.items():
lowerCAmelCase__ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,hf_model.config.feat_extract_norm == '''group''' ,)
lowerCAmelCase__ : Optional[int] = True
elif name.split('''.''')[0] == "proj":
lowerCAmelCase__ : Any = fairseq_model.proj
lowerCAmelCase__ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]:
lowerCAmelCase__ : List[str] = True
if "*" in mapped_key:
lowerCAmelCase__ : Union[str, Any] = name.split(lowerCamelCase_)[0].split('''.''')[-2]
lowerCAmelCase__ : List[str] = mapped_key.replace('''*''' ,lowerCamelCase_)
if "weight_g" in name:
lowerCAmelCase__ : Any = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase__ : int = '''weight_v'''
elif "bias" in name:
lowerCAmelCase__ : int = '''bias'''
elif "weight" in name:
lowerCAmelCase__ : Any = '''weight'''
else:
lowerCAmelCase__ : Union[str, Any] = None
set_recursively(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f"""Unused weights: {unused_weights}""")
return proj_weight
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Any ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = full_name.split('''conv_layers.''')[-1]
lowerCAmelCase__ : List[Any] = name.split('''.''')
lowerCAmelCase__ : Union[str, Any] = int(items[0])
lowerCAmelCase__ : List[Any] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase__ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase__ : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase__ : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
else:
unused_weights.append(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = emb.weight.shape
lowerCAmelCase__ : Dict = nn.Linear(lowerCamelCase_ ,lowerCamelCase_ ,bias=lowerCamelCase_)
lowerCAmelCase__ : Dict = emb.weight.data
return lin_layer
def lowerCAmelCase__ ( lowerCamelCase_ : List[str]):
'''simple docstring'''
with open(lowerCamelCase_ ,'''r''' ,encoding='''utf-8''') as f:
lowerCAmelCase__ : Optional[int] = f.readlines()
lowerCAmelCase__ : str = [line.split(''' ''')[0] for line in lines]
lowerCAmelCase__ : List[Any] = len(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(lowerCamelCase_ ,range(4 ,num_words + 4))))
return vocab_dict
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : int ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : int ,lowerCamelCase_ : Any ,lowerCamelCase_ : Optional[Any] ,):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = WavaVecaConfig.from_pretrained(lowerCamelCase_)
lowerCAmelCase__ : Any = SpeechaTextaConfig.from_pretrained(
lowerCamelCase_ ,vocab_size=lowerCamelCase_ ,decoder_layers=lowerCamelCase_ ,do_stable_layer_norm=lowerCamelCase_)
lowerCAmelCase__ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
lowerCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1])})
lowerCAmelCase__ : List[str] = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase__ : Any = WavaVecaModel(lowerCamelCase_)
lowerCAmelCase__ : List[str] = recursively_load_weights_wavaveca(model.encoder ,lowerCamelCase_)
lowerCAmelCase__ : Any = SpeechaTextaForCausalLM(lowerCamelCase_)
lowerCAmelCase__ : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=lowerCamelCase_)
# set output linear layer
unexpected_keys.remove('''embed_out''')
lowerCAmelCase__ : Dict = nn.Parameter(model.decoder.embed_out.detach())
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""")
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""")
lowerCAmelCase__ : Tuple = SpeechEncoderDecoderModel(encoder=lowerCamelCase_ ,decoder=lowerCamelCase_)
lowerCAmelCase__ : List[str] = False
# add projection layer
lowerCAmelCase__ : List[Any] = nn.Parameter(projection_layer.weight)
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(projection_layer.bias)
lowerCAmelCase__ : Any = create_vocab_dict(lowerCamelCase_)
with open(os.path.join(lowerCamelCase_ ,'''vocab.json''') ,'''w''') as fp:
json.dump(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = SpeechaTextaTokenizer(os.path.join(lowerCamelCase_ ,'''vocab.json'''))
tokenizer.save_pretrained(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = hf_wavavec.config.to_dict()
lowerCAmelCase__ : List[str] = tokenizer.pad_token_id
lowerCAmelCase__ : Dict = tokenizer.bos_token_id
lowerCAmelCase__ : Optional[Any] = tokenizer.eos_token_id
lowerCAmelCase__ : Union[str, Any] = '''speech_to_text_2'''
lowerCAmelCase__ : Optional[Any] = '''wav2vec2'''
lowerCAmelCase__ : str = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase_)
hf_wavavec.save_pretrained(lowerCamelCase_)
feature_extractor.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : List[str] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__snake_case : str =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 360 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase__ ( lowerCamelCase_ : ndarray):
'''simple docstring'''
return np.dot(lowerCamelCase_ ,lowerCamelCase_)
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,*,
__lowerCamelCase = np.inf ,__lowerCamelCase = "linear" ,__lowerCamelCase = 0.0 ,) -> None:
"""simple docstring"""
lowerCAmelCase__ : Any = regularization
lowerCAmelCase__ : str = gamma
if kernel == "linear":
lowerCAmelCase__ : Dict = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowerCAmelCase__ : Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCAmelCase__ : List[str] = f"""Unknown kernel: {kernel}"""
raise ValueError(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> float:
"""simple docstring"""
return np.dot(__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = observations
lowerCAmelCase__ : Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCAmelCase__) , ) : List[str] = np.shape(__lowerCamelCase )
def to_minimize(__lowerCamelCase ) -> float:
lowerCAmelCase__ : List[str] = 0
((lowerCAmelCase__) , ) : str = np.shape(__lowerCamelCase )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(__lowerCamelCase )
lowerCAmelCase__ : List[str] = LinearConstraint(__lowerCamelCase ,0 ,0 )
lowerCAmelCase__ : List[str] = Bounds(0 ,self.regularization )
lowerCAmelCase__ : int = minimize(
__lowerCamelCase ,np.ones(__lowerCamelCase ) ,bounds=__lowerCamelCase ,constraints=[ly_contraint] ).x
lowerCAmelCase__ : List[Any] = l_star
# calculating mean offset of separation plane to points
lowerCAmelCase__ : Optional[Any] = 0
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
lowerCAmelCase__ : Dict = s / n
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,__lowerCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_SCREAMING_SNAKE_CASE = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 2048-bit
1_4: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 3072-bit
1_5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 4096-bit
1_6: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 6144-bit
1_7: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 8192-bit
1_8: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
}
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , lowerCamelCase_ : int = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError("""Unsupported Group""" )
UpperCamelCase = primes[group]["""prime"""]
UpperCamelCase = primes[group]["""generator"""]
UpperCamelCase = int(hexlify(urandom(32 ) ) , base=16 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase_ )[2:]
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_ , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = int(lowerCamelCase_ , base=16 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError("""Invalid public key""" )
UpperCamelCase = pow(lowerCamelCase_ , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_ , (prime - 1) // 2 , lowerCamelCase_ ) == 1
)
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int = 14 ):
"""simple docstring"""
UpperCamelCase = int(lowerCamelCase_ , base=16 )
UpperCamelCase = int(lowerCamelCase_ , base=16 )
UpperCamelCase = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("""Invalid public key""" )
UpperCamelCase = pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = num_groups
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = BitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = BitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=lowerCamelCase_ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
__lowerCAmelCase = BitConfig
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
| 343 | 1 |
import random
from typing import Any
def _SCREAMING_SNAKE_CASE ( lowercase : list ):
'''simple docstring'''
for _ in range(len(lowercase ) ):
lowerCamelCase_ = random.randint(0 , len(lowercase ) - 1 )
lowerCamelCase_ = random.randint(0 , len(lowercase ) - 1 )
lowerCamelCase_ , lowerCamelCase_ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase : Any = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase : int = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 208 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = 42
class A( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , A_ : int = 32 , A_ : int = 64 , A_ : int = 20 , A_ : int = 768 , A_ : Optional[Any]=77 , A_ : Optional[int]=4 , A_ : float = 0.0 , A_ : str = "silu" , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[str] = "linear" , A_ : Optional[str] = "prd" , A_ : Optional[int] = None , A_ : Optional[int] = None , A_ : Optional[int] = None , ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = attention_head_dim
lowerCamelCase_ = num_attention_heads * attention_head_dim
lowerCamelCase_ = additional_embeddings
lowerCamelCase_ = time_embed_dim or inner_dim
lowerCamelCase_ = embedding_proj_dim or embedding_dim
lowerCamelCase_ = clip_embed_dim or embedding_dim
lowerCamelCase_ = Timesteps(A_ , A_ , 0 )
lowerCamelCase_ = TimestepEmbedding(A_ , A_ , out_dim=A_ , act_fn=A_ )
lowerCamelCase_ = nn.Linear(A_ , A_ )
if embedding_proj_norm_type is None:
lowerCamelCase_ = None
elif embedding_proj_norm_type == "layer":
lowerCamelCase_ = nn.LayerNorm(A_ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
lowerCamelCase_ = nn.Linear(A_ , A_ )
if encoder_hid_proj_type is None:
lowerCamelCase_ = None
elif encoder_hid_proj_type == "linear":
lowerCamelCase_ = nn.Linear(A_ , A_ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , A_ ) )
if added_emb_type == "prd":
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , 1 , A_ ) )
elif added_emb_type is None:
lowerCamelCase_ = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
lowerCamelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
A_ , A_ , A_ , dropout=A_ , activation_fn='gelu' , attention_bias=A_ , )
for d in range(A_ )
] )
if norm_in_type == "layer":
lowerCamelCase_ = nn.LayerNorm(A_ )
elif norm_in_type is None:
lowerCamelCase_ = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
lowerCamelCase_ = nn.LayerNorm(A_ )
lowerCamelCase_ = nn.Linear(A_ , A_ )
lowerCamelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
lowerCamelCase_ = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , A_ , persistent=A_ )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , A_ ) )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , A_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self : str ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
lowerCamelCase_ = {}
def fn_recursive_add_processors(A_ : str , A_ : torch.nn.Module , A_ : Dict[str, AttentionProcessor] ):
if hasattr(A_ , 'set_processor' ):
lowerCamelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , A_ , A_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A_ , A_ , A_ )
return processors
def a__ ( self : List[Any] , A_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = len(self.attn_processors.keys() )
if isinstance(A_ , A_ ) and len(A_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(A_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(A_ : str , A_ : torch.nn.Module , A_ : Union[str, Any] ):
if hasattr(A_ , 'set_processor' ):
if not isinstance(A_ , A_ ):
module.set_processor(A_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , A_ , A_ )
for name, module in self.named_children():
fn_recursive_attn_processor(A_ , A_ , A_ )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def a__ ( self : Dict , A_ : List[Any] , A_ : Union[torch.Tensor, float, int] , A_ : torch.FloatTensor , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.BoolTensor] = None , A_ : bool = True , ) -> str:
"""simple docstring"""
lowerCamelCase_ = hidden_states.shape[0]
lowerCamelCase_ = timestep
if not torch.is_tensor(A_ ):
lowerCamelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ = timesteps * torch.ones(A_ , dtype=timesteps.dtype , device=timesteps.device )
lowerCamelCase_ = self.time_proj(A_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCamelCase_ = timesteps_projected.to(dtype=self.dtype )
lowerCamelCase_ = self.time_embedding(A_ )
if self.embedding_proj_norm is not None:
lowerCamelCase_ = self.embedding_proj_norm(A_ )
lowerCamelCase_ = self.embedding_proj(A_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCamelCase_ = self.encoder_hidden_states_proj(A_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowerCamelCase_ = self.proj_in(A_ )
lowerCamelCase_ = self.positional_embedding.to(hidden_states.dtype )
lowerCamelCase_ = []
lowerCamelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(A_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowerCamelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowerCamelCase_ = hidden_states[:, None, :]
lowerCamelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCamelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(A_ , -1 , -1 )
additional_embeds.append(A_ )
lowerCamelCase_ = torch.cat(
A_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCamelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCamelCase_ = F.pad(
A_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCamelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
lowerCamelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
lowerCamelCase_ = F.pad(A_ , (0, self.additional_embeddings) , value=0.0 )
lowerCamelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowerCamelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowerCamelCase_ = self.norm_in(A_ )
for block in self.transformer_blocks:
lowerCamelCase_ = block(A_ , attention_mask=A_ )
lowerCamelCase_ = self.norm_out(A_ )
if self.prd_embedding is not None:
lowerCamelCase_ = hidden_states[:, -1]
else:
lowerCamelCase_ = hidden_states[:, additional_embeddings_len:]
lowerCamelCase_ = self.proj_to_clip_embeddings(A_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=A_ )
def a__ ( self : Tuple , A_ : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 208 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__a = NewType('DataClass', Any)
__a = NewType('DataClassType', Any)
def a ( snake_case__: List[str] ):
'''simple docstring'''
if isinstance(a_ , a_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def a ( snake_case__: list ):
'''simple docstring'''
lowercase_ = {str(a_ ): choice for choice in choices}
return lambda snake_case__ : str_to_choice.get(a_ , a_ )
def a ( *,
snake_case__: Union[str, List[str]] = None , snake_case__: str = None , snake_case__: Any = dataclasses.MISSING , snake_case__: Callable[[], Any] = dataclasses.MISSING , snake_case__: dict = None , **snake_case__: str , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowercase_ = {}
if aliases is not None:
lowercase_ = aliases
if help is not None:
lowercase_ = help
return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_ )
class lowercase__( _UpperCamelCase ):
"""simple docstring"""
a :int = 42
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowercase_ = ArgumentDefaultsHelpFormatter
super().__init__(**_UpperCAmelCase )
if dataclasses.is_dataclass(_UpperCAmelCase ):
lowercase_ = [dataclass_types]
lowercase_ = list(_UpperCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_UpperCAmelCase )
@staticmethod
def _lowercase ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
lowercase_ = f'''--{field.name}'''
lowercase_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _UpperCAmelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowercase_ = kwargs.pop('''aliases''' , [] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase_ = [aliases]
lowercase_ = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(_UpperCAmelCase , '''UnionType''' ) and isinstance(_UpperCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_UpperCAmelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f''' Problem encountered in field \'{field.name}\'.''' )
if type(_UpperCAmelCase ) not in field.type.__args__:
# filter `str` in Union
lowercase_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowercase_ = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowercase_ = (
field.type.__args__[0] if isinstance(_UpperCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowercase_ = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowercase_ = {}
if origin_type is Literal or (isinstance(field.type , _UpperCAmelCase ) and issubclass(field.type , _UpperCAmelCase )):
if origin_type is Literal:
lowercase_ = field.type.__args__
else:
lowercase_ = [x.value for x in field.type]
lowercase_ = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowercase_ = field.default
else:
lowercase_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowercase_ = copy(_UpperCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
lowercase_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowercase_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowercase_ = default
# This tells argparse we accept 0 or 1 value after --field_name
lowercase_ = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowercase_ = True
elif isclass(_UpperCAmelCase ) and issubclass(_UpperCAmelCase , _UpperCAmelCase ):
lowercase_ = field.type.__args__[0]
lowercase_ = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowercase_ = field.default_factory()
elif field.default is dataclasses.MISSING:
lowercase_ = True
else:
lowercase_ = field.type
if field.default is not dataclasses.MISSING:
lowercase_ = field.default
elif field.default_factory is not dataclasses.MISSING:
lowercase_ = field.default_factory()
else:
lowercase_ = True
parser.add_argument(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowercase_ = False
parser.add_argument(f'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **_UpperCAmelCase )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
if hasattr(_UpperCAmelCase , '''_argument_group_name''' ):
lowercase_ = self.add_argument_group(dtype._argument_group_name )
else:
lowercase_ = self
try:
lowercase_ = get_type_hints(_UpperCAmelCase )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(_UpperCAmelCase ):
lowercase_ = '''.'''.join(map(_UpperCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_UpperCAmelCase ):
if not field.init:
continue
lowercase_ = type_hints[field.name]
self._parse_dataclass_field(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : List[str]=None , ) -> Optional[Any]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowercase_ = []
if args_filename:
args_files.append(Path(_UpperCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowercase_ = ArgumentParser()
args_file_parser.add_argument(_UpperCAmelCase , type=_UpperCAmelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowercase_ = args_file_parser.parse_known_args(args=_UpperCAmelCase )
lowercase_ = vars(_UpperCAmelCase ).get(args_file_flag.lstrip('''-''' ) , _UpperCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_UpperCAmelCase ) for p in cmd_args_file_paths] )
lowercase_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowercase_ = file_args + args if args is not None else file_args + sys.argv[1:]
lowercase_ = self.parse_known_args(args=_UpperCAmelCase )
lowercase_ = []
for dtype in self.dataclass_types:
lowercase_ = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
lowercase_ = {k: v for k, v in vars(_UpperCAmelCase ).items() if k in keys}
for k in keys:
delattr(_UpperCAmelCase , _UpperCAmelCase )
lowercase_ = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_UpperCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple = False ) -> List[Any]:
lowercase_ = set(args.keys() )
lowercase_ = []
for dtype in self.dataclass_types:
lowercase_ = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
lowercase_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowercase_ = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(_UpperCAmelCase )}''' )
return tuple(_UpperCAmelCase )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int = False ) -> Optional[Any]:
with open(Path(_UpperCAmelCase ) , encoding='''utf-8''' ) as open_json_file:
lowercase_ = json.loads(open_json_file.read() )
lowercase_ = self.parse_dict(_UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] = False ) -> List[Any]:
lowercase_ = self.parse_dict(yaml.safe_load(Path(_UpperCAmelCase ).read_text() ) , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 30 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=6 , _UpperCAmelCase=17 , _UpperCAmelCase=23 , _UpperCAmelCase=11 , _UpperCAmelCase=True , ):
__a : str = parent
__a : Any = batch_size
__a : Any = seq_length
__a : Optional[int] = act_dim
__a : Any = state_dim
__a : str = hidden_size
__a : List[str] = max_length
__a : Dict = is_training
def _lowerCamelCase ( self ):
__a : Dict = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__a : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__a : int = floats_tensor((self.batch_size, self.seq_length, 1) )
__a : List[str] = floats_tensor((self.batch_size, self.seq_length, 1) )
__a : Optional[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
__a : str = random_attention_mask((self.batch_size, self.seq_length) )
__a : Dict = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowerCamelCase ( self ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a : Union[str, Any] = DecisionTransformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = config_and_inputs
__a : Dict = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCAmelCase = ()
__lowerCAmelCase = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCAmelCase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : str = DecisionTransformerModelTester(self )
__a : Any = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = DecisionTransformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = model_class(_UpperCAmelCase )
__a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[int] = [*signature.parameters.keys()]
__a : Union[str, Any] = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(_UpperCAmelCase )] , _UpperCAmelCase )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
__a : Dict = 2 # number of steps of autoregressive prediction we will perform
__a : List[str] = 10 # defined by the RL environment, may be normalized
__a : Union[str, Any] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
__a : str = model.to(_UpperCAmelCase )
__a : str = model.config
torch.manual_seed(0 )
__a : List[str] = torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ) # env.reset()
__a : List[str] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=_UpperCAmelCase )
__a : str = torch.tensor(_UpperCAmelCase , device=_UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__a : str = state
__a : List[Any] = torch.zeros(1 , 0 , config.act_dim , device=_UpperCAmelCase , dtype=torch.floataa )
__a : List[Any] = torch.zeros(1 , 0 , device=_UpperCAmelCase , dtype=torch.floataa )
__a : int = torch.tensor(0 , device=_UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_UpperCAmelCase ):
__a : Optional[int] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_UpperCAmelCase )] , dim=1 )
__a : Any = torch.cat([rewards, torch.zeros(1 , 1 , device=_UpperCAmelCase )] , dim=1 )
__a : Any = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__a , __a , __a : int = model(
states=_UpperCAmelCase , actions=_UpperCAmelCase , rewards=_UpperCAmelCase , returns_to_go=_UpperCAmelCase , timesteps=_UpperCAmelCase , attention_mask=_UpperCAmelCase , return_dict=_UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
__a , __a , __a , __a : Dict = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
__a : int = action_pred[0, -1]
__a : int = torch.cat([states, state] , dim=1 )
__a : Any = returns_to_go[0, -1] - reward
__a : Dict = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__a : Optional[Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=_UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 ) | 160 | 0 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = ["vqvae"]
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> int:
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase , mel=UpperCAmelCase , vqvae=UpperCAmelCase )
def lowercase (self ) -> int:
return 50 if isinstance(self.scheduler , UpperCAmelCase ) else 1000
@torch.no_grad()
def __call__(self , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_snake_case = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase )
_snake_case = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_snake_case = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_snake_case = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase , device=self.device , )
_snake_case = noise
_snake_case = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase , UpperCAmelCase )
_snake_case = self.mel.audio_slice_to_image(UpperCAmelCase )
_snake_case = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
_snake_case = (input_image / 255) * 2 - 1
_snake_case = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_snake_case = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=UpperCAmelCase )[0]
_snake_case = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_snake_case = self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
_snake_case = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_snake_case = int(mask_start_secs * pixels_per_second )
_snake_case = int(mask_end_secs * pixels_per_second )
_snake_case = self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCAmelCase ):
_snake_case = self.unet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )["""sample"""]
else:
_snake_case = self.unet(UpperCAmelCase , UpperCAmelCase )["""sample"""]
if isinstance(self.scheduler , UpperCAmelCase ):
_snake_case = self.scheduler.step(
model_output=UpperCAmelCase , timestep=UpperCAmelCase , sample=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , )["""prev_sample"""]
else:
_snake_case = self.scheduler.step(
model_output=UpperCAmelCase , timestep=UpperCAmelCase , sample=UpperCAmelCase , generator=UpperCAmelCase , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
_snake_case = mask[:, step, :, :mask_start]
if mask_end > 0:
_snake_case = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_snake_case = 1 / self.vqvae.config.scaling_factor * images
_snake_case = self.vqvae.decode(UpperCAmelCase )["""sample"""]
_snake_case = (images / 2 + 0.5).clamp(0 , 1 )
_snake_case = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_snake_case = (images * 255).round().astype("""uint8""" )
_snake_case = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase , mode="""RGB""" ).convert("""L""" ) for _ in images) )
_snake_case = [self.mel.image_to_audio(UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCAmelCase ) )
@torch.no_grad()
def lowercase (self , UpperCAmelCase , UpperCAmelCase = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , UpperCAmelCase )
self.scheduler.set_timesteps(UpperCAmelCase )
_snake_case = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
_snake_case = (sample / 255) * 2 - 1
_snake_case = torch.Tensor(UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_snake_case = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_snake_case = self.scheduler.alphas_cumprod[t]
_snake_case = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_snake_case = 1 - alpha_prod_t
_snake_case = self.unet(UpperCAmelCase , UpperCAmelCase )["""sample"""]
_snake_case = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_snake_case = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_snake_case = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowercase (UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> torch.Tensor:
_snake_case = acos(torch.dot(torch.flatten(UpperCAmelCase ) , torch.flatten(UpperCAmelCase ) ) / torch.norm(UpperCAmelCase ) / torch.norm(UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(UpperCAmelCase ) | 270 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
__lowerCAmelCase = logging.getLogger(__name__)
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "masked_bert"
def __init__(self , UpperCAmelCase=30522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=0 , UpperCAmelCase="topK" , UpperCAmelCase="constant" , UpperCAmelCase=0.0 , **UpperCAmelCase , ) -> int:
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = pruning_method
_snake_case = mask_init
_snake_case = mask_scale | 270 | 1 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class _lowerCamelCase ( lowercase__ ):
UpperCAmelCase_ = """xlm-prophetnet"""
UpperCAmelCase_ = ["""past_key_values"""]
UpperCAmelCase_ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__(self , __a = 0.1 , __a = "gelu" , __a = 3_05_22 , __a = 10_24 , __a = 40_96 , __a = 12 , __a = 16 , __a = 40_96 , __a = 12 , __a = 16 , __a = 0.1 , __a = 0.1 , __a = 5_12 , __a = 0.02 , __a = True , __a = True , __a = 0 , __a = 2 , __a = 32 , __a = 1_28 , __a = False , __a = 0.0 , __a = True , __a = 0 , __a = 1 , __a = 2 , **__a , ) -> Any:
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = num_encoder_layers
UpperCamelCase = num_encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = num_decoder_layers
UpperCamelCase = num_decoder_attention_heads
UpperCamelCase = max_position_embeddings
UpperCamelCase = init_std # Normal(0, this parameter)
UpperCamelCase = activation_function
# parameters for xlmprophetnet
UpperCamelCase = ngram
UpperCamelCase = num_buckets
UpperCamelCase = relative_max_distance
UpperCamelCase = disable_ngram_loss
UpperCamelCase = eps
# 3 Types of Dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = dropout
UpperCamelCase = use_cache
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
@property
def snake_case_ (self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def snake_case_ (self , __a ) -> List[str]:
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 153 |
"""simple docstring"""
def __a ( __lowerCamelCase = 3, __lowerCamelCase = 7, __lowerCamelCase = 100_0000 ):
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 1
for current_denominator in range(1, limit + 1 ):
UpperCAmelCase_ : Dict = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCAmelCase_ : List[Any] = current_numerator
UpperCAmelCase_ : Optional[int] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 61 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCamelCase = DisjunctiveConstraint(_UpperCAmelCase )
self.assertTrue(isinstance(dc.token_ids , _UpperCAmelCase ) )
with self.assertRaises(_UpperCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_UpperCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_UpperCAmelCase ):
DisjunctiveConstraint(_UpperCAmelCase ) # fails here
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCamelCase = DisjunctiveConstraint(_UpperCAmelCase )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = dc.update(1 )
__lowerCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = dc.update(2 )
__lowerCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = dc.update(3 )
__lowerCamelCase = stepped is True and completed is True and reset is False
self.assertTrue(_UpperCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCamelCase = DisjunctiveConstraint(_UpperCAmelCase )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 366 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__snake_case : Any ='tiny-wmt19-en-ru'
# Build
# borrowed from a test
__snake_case : List[Any] =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__snake_case : List[Any] =dict(zip(vocab, range(len(vocab))))
__snake_case : List[str] =['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Union[str, Any] =Path(tmpdirname)
__snake_case : Dict =build_dir / VOCAB_FILES_NAMES['src_vocab_file']
__snake_case : Tuple =build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
__snake_case : Optional[int] =build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
__snake_case : List[str] =FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__snake_case : str =FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__snake_case : int =FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__snake_case : Any =tokenizer(['Making tiny model'], return_tensors='pt')
__snake_case : Optional[Any] =tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 129 |
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]]):
'''simple docstring'''
lowerCAmelCase__ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(lowerCamelCase_):
if len(lowerCamelCase_) < i + 1:
data_lists.append([])
data_lists[i].append(float(lowerCamelCase_))
return data_lists
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]] ,lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : list[list[float]] = []
for dlist, weight in zip(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : str = min(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = max(lowerCamelCase_)
lowerCAmelCase__ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
lowerCAmelCase__ : Optional[int] = f"""Invalid weight of {weight:f} provided"""
raise ValueError(lowerCamelCase_)
score_lists.append(lowerCamelCase_)
return score_lists
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]]):
'''simple docstring'''
lowerCAmelCase__ : list[float] = [0 for i in range(len(score_lists[0]))]
for slist in score_lists:
for j, ele in enumerate(lowerCamelCase_):
lowerCAmelCase__ : str = final_scores[j] + ele
return final_scores
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]] ,lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = get_data(lowerCamelCase_)
lowerCAmelCase__ : Dict = calculate_each_score(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = generate_final_scores(lowerCamelCase_)
# append scores to source data
for i, ele in enumerate(lowerCamelCase_):
source_data[i].append(lowerCamelCase_)
return source_data
| 129 | 1 |
from __future__ import annotations
import numpy as np
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return np.maximum(0 , lowerCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 350 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.exp(lowerCAmelCase__ )
lowercase = torch.sum(lowerCAmelCase__ , dim=1 ) # sum of exp(x_i)
lowercase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCAmelCase__ ) - B / A
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = config.output_attentions
lowercase = config.output_hidden_states
lowercase = nn.ModuleList([BertLayer(A__) for _ in range(config.num_hidden_layers)])
lowercase = nn.ModuleList([BertHighway(A__) for _ in range(config.num_hidden_layers)])
lowercase = [-1 for _ in range(config.num_hidden_layers)]
def A__ ( self ,A__):
if (type(A__) is float) or (type(A__) is int):
for i in range(len(self.early_exit_entropy)):
lowercase = x
else:
lowercase = x
def A__ ( self ,A__):
lowercase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def A__ ( self ,A__ ,A__=None ,A__=None ,A__=None ,A__=None ,):
lowercase = ()
lowercase = ()
lowercase = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = layer_module(
A__ ,A__ ,head_mask[i] ,A__ ,A__)
lowercase = layer_outputs[0]
if self.output_attentions:
lowercase = all_attentions + (layer_outputs[1],)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = current_outputs + (all_attentions,)
lowercase = self.highway[i](A__)
# logits, pooled_output
if not self.training:
lowercase = highway_exit[0]
lowercase = entropy(A__)
lowercase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowercase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowercase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A__ ,i + 1)
else:
lowercase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = outputs + (all_attentions,)
lowercase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__):
super().__init__(A__)
lowercase = config
lowercase = BertEmbeddings(A__)
lowercase = DeeBertEncoder(A__)
lowercase = BertPooler(A__)
self.init_weights()
def A__ ( self):
self.encoder.init_highway_pooler(self.pooler)
def A__ ( self):
return self.embeddings.word_embeddings
def A__ ( self ,A__):
lowercase = value
def A__ ( self ,A__):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A__)
@add_start_docstrings_to_model_forward(A__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''')
elif input_ids is not None:
lowercase = input_ids.size()
elif inputs_embeds is not None:
lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''')
lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase = torch.ones(A__ ,device=A__)
if encoder_attention_mask is None:
lowercase = torch.ones(A__ ,device=A__)
if token_type_ids is None:
lowercase = torch.zeros(A__ ,dtype=torch.long ,device=A__)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase = self.get_extended_attention_mask(A__ ,A__ ,A__)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowercase = encoder_attention_mask[:, None, None, :]
lowercase = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
lowercase = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase = self.get_head_mask(A__ ,self.config.num_hidden_layers)
lowercase = self.embeddings(
input_ids=A__ ,position_ids=A__ ,token_type_ids=A__ ,inputs_embeds=A__)
lowercase = self.encoder(
A__ ,attention_mask=A__ ,head_mask=A__ ,encoder_hidden_states=A__ ,encoder_attention_mask=A__ ,)
lowercase = encoder_outputs[0]
lowercase = self.pooler(A__)
lowercase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__):
lowercase = message
lowercase = exit_layer # start from 1!
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = BertPooler(A__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,config.num_labels)
def A__ ( self ,A__):
# Pooler
lowercase = encoder_outputs[0]
lowercase = self.pooler(A__)
# "return" pooler_output
# BertModel
lowercase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowercase = bmodel_output[1]
lowercase = self.dropout(A__)
lowercase = self.classifier(A__)
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__):
super().__init__(A__)
lowercase = config.num_labels
lowercase = config.num_hidden_layers
lowercase = DeeBertModel(A__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(A__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=-1 ,A__=False ,):
lowercase = self.num_layers
try:
lowercase = self.bert(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,position_ids=A__ ,head_mask=A__ ,inputs_embeds=A__ ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowercase = outputs[1]
lowercase = self.dropout(A__)
lowercase = self.classifier(A__)
lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase = e.message
lowercase = e.exit_layer
lowercase = outputs[0]
if not self.training:
lowercase = entropy(A__)
lowercase = []
lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
# work with highway exits
lowercase = []
for highway_exit in outputs[-1]:
lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(A__)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(highway_logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels) ,labels.view(-1))
highway_losses.append(A__)
if train_highway:
lowercase = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
lowercase = (loss,) + outputs
if not self.training:
lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 97 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if len(lowerCAmelCase ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(lowerCAmelCase )
or left < -len(lowerCAmelCase )
or right >= len(lowerCAmelCase )
or right < -len(lowerCAmelCase )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
_lowerCAmelCase = (left + right) >> 1 # the middle
_lowerCAmelCase = find_max(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # find max in range[left, mid]
_lowerCAmelCase = find_max(lowerCAmelCase , mid + 1 , lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 70 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : List[str] =logging.get_logger(__name__)
A__ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : Any ={
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
A__ : Optional[int] ={
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
A__ : Optional[int] ={
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[Any] = VOCAB_FILES_NAMES
_lowercase: Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: str = PRETRAINED_INIT_CONFIGURATION
_lowercase: List[Any] = RoFormerTokenizer
def __init__( self : Dict , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : List[Any]=True , __snake_case : str="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : str="[PAD]" , __snake_case : str="[CLS]" , __snake_case : Any="[MASK]" , __snake_case : Dict=True , __snake_case : str=None , **__snake_case : Optional[Any] , ) -> Union[str, Any]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
_lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , __snake_case ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , __snake_case ) != strip_accents
):
_lowerCAmelCase = getattr(__snake_case , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = strip_accents
_lowerCAmelCase = pre_tok_class(**__snake_case )
_lowerCAmelCase = do_lower_case
def __getstate__( self : int ) -> Optional[int]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = BertPreTokenizer()
return state
def __setstate__( self : Tuple , __snake_case : Tuple ) -> List[str]:
_lowerCAmelCase = d
_lowerCAmelCase = self.__dict__["""_tokenizer"""].get_vocab()
_lowerCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(__snake_case ) )
def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[Any]:
_lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
_lowerCAmelCase = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase__ ( self : Dict , __snake_case : Dict , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : List[Any]=False , **__snake_case : Dict , ) -> str:
_lowerCAmelCase = BertPreTokenizer()
return super().save_pretrained(__snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
| 70 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """cvt"""
def __init__( self : List[Any] , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=[7, 3, 3] , _UpperCAmelCase : Optional[Any]=[4, 2, 2] , _UpperCAmelCase : List[Any]=[2, 1, 1] , _UpperCAmelCase : Optional[int]=[64, 1_92, 3_84] , _UpperCAmelCase : Any=[1, 3, 6] , _UpperCAmelCase : Tuple=[1, 2, 10] , _UpperCAmelCase : Union[str, Any]=[4.0, 4.0, 4.0] , _UpperCAmelCase : Optional[int]=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.1] , _UpperCAmelCase : Optional[int]=[True, True, True] , _UpperCAmelCase : Dict=[False, False, True] , _UpperCAmelCase : Dict=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase : int=[3, 3, 3] , _UpperCAmelCase : Optional[int]=[1, 1, 1] , _UpperCAmelCase : List[Any]=[2, 2, 2] , _UpperCAmelCase : Union[str, Any]=[1, 1, 1] , _UpperCAmelCase : str=[1, 1, 1] , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Dict=1E-12 , **_UpperCAmelCase : Any , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = patch_sizes
UpperCAmelCase__ = patch_stride
UpperCAmelCase__ = patch_padding
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = depth
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = attention_drop_rate
UpperCAmelCase__ = drop_rate
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = cls_token
UpperCAmelCase__ = qkv_projection_method
UpperCAmelCase__ = kernel_qkv
UpperCAmelCase__ = padding_kv
UpperCAmelCase__ = stride_kv
UpperCAmelCase__ = padding_q
UpperCAmelCase__ = stride_q
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
| 61 | 0 |
def lowerCAmelCase_ ( snake_case_ ):
if len(snake_case_ ) <= 1:
return lst
_A : int = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_A , _A : List[str] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_A : Tuple = 1
return lst
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 26 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a_ : Any = TypeVar("T")
class a ( Generic[T] ):
def __init__( self , __magic_name__ , __magic_name__ ) -> None:
_a = None
_a = len(__magic_name__ )
_a = [any_type for _ in range(self.N )] + arr
_a = fnc
self.build()
def __UpperCAmelCase ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
p += self.N
_a = v
while p > 1:
_a = p // 2
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> T | None: # noqa: E741
_a , _a = l + self.N, r + self.N
_a = None
while l <= r:
if l % 2 == 1:
_a = self.st[l] if res is None else self.fn(__magic_name__ , self.st[l] )
if r % 2 == 0:
_a = self.st[r] if res is None else self.fn(__magic_name__ , self.st[r] )
_a , _a = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a_ : Union[str, Any] = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
a_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
a_ : Dict = SegmentTree(test_array, min)
a_ : Optional[int] = SegmentTree(test_array, max)
a_ : int = SegmentTree(test_array, lambda a, b: a + b)
def _A () -> None:
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
_a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
_a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
_a = reduce(lambda lowerCAmelCase__ , lowerCAmelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert max_range == max_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert sum_range == sum_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
test_all_segments()
for index, value in test_updates.items():
a_ : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 168 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _a ( UpperCamelCase__ ):
"""simple docstring"""
def lowerCamelCase_ ( self: Dict ) -> str:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = 8
# DPR tok
lowercase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
lowercase__ = os.path.join(lowerCAmelCase__ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowercase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ = {"unk_token": "<unk>"}
lowercase__ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
lowercase__ = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
def lowerCamelCase_ ( self: Tuple ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase_ ( self: Tuple ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase_ ( self: str ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = self.get_dummy_dataset()
lowercase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowercase__ = dataset
lowercase__ = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCamelCase_ ( self: Any , UpperCamelCase_: bool ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_dummy_dataset()
lowercase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
lowercase__ = os.path.join(self.tmpdirname , '''dataset''' )
lowercase__ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
lowercase__ = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowerCAmelCase__ ) , )
return retriever
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
lowercase__ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
lowercase__ = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(lowerCAmelCase__ , open(lowerCAmelCase__ , '''wb''' ) )
lowercase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
lowercase__ = RagRetriever(
lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 1
lowercase__ = self.get_dummy_canonical_hf_index_retriever()
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase_ ( self: Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowercase__ = self.get_dummy_dataset()
retriever.save_pretrained(lowerCAmelCase__ )
lowercase__ = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 1
lowercase__ = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
lowercase__ = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = 1
lowercase__ = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
lowercase__ = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = 1
lowercase__ = self.get_dummy_legacy_index_retriever()
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , lowerCAmelCase__ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase__ )
lowercase__ = RagRetriever.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ = retriever.retrieve(lowerCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase_ ( self: Any ) -> str:
"""simple docstring"""
import torch
lowercase__ = 1
lowercase__ = self.get_dummy_canonical_hf_index_retriever()
lowercase__ = [[5, 7], [10, 11]]
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ = retriever(lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ )
lowercase__ = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
lowercase__ = retriever(
lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ , return_tensors='''pt''' , )
lowercase__ = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_dpr_ctx_encoder_tokenizer()
lowercase__ = 1
lowercase__ = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ )
retriever.set_ctx_encoder_tokenizer(lowerCAmelCase__ )
lowercase__ = [[5, 7], [10, 11]]
lowercase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ = retriever(lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ )
self.assertEqual(
len(lowerCAmelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , lowerCAmelCase__ ) # check for doc token related keys in dictionary.
| 367 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(SCREAMING_SNAKE_CASE )
lowercase__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase__ = True
for j in range(SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase__ = False
break
if match_found:
position.append(SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 93 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def a( A : str ) -> int:
"""simple docstring"""
a = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f'''{test_file} instead.''' )
a = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
a = components[:-1] + [test_fn.replace(".py" , "" )]
a = ".".join(_A )
return test_module_path
def a( A : Any ) -> Tuple:
"""simple docstring"""
a = get_module_path(_A )
a = importlib.import_module(_A )
return test_module
def a( A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
a = []
a = get_test_module(_A )
for attr in dir(_A ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(_A , _A ) )
# sort with class names
return sorted(_A , key=lambda A : x.__name__ )
def a( A : str ) -> List[str]:
"""simple docstring"""
a = []
a = get_test_module(_A )
for attr in dir(_A ):
a = getattr(_A , _A )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
a = getattr(_A , "all_model_classes" , [] )
if len(_A ) > 0:
test_classes.append(_A )
# sort with class names
return sorted(_A , key=lambda A : x.__name__ )
def a( A : Tuple ) -> Optional[Any]:
"""simple docstring"""
a = get_test_classes(_A )
a = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_A , key=lambda A : x.__name__ )
def a( A : str ) -> str:
"""simple docstring"""
a = test_class()
if hasattr(_A , "setUp" ):
test.setUp()
a = None
if hasattr(_A , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
a = test.model_tester.__class__
return model_tester
def a( A : List[str] , A : List[Any] ) -> str:
"""simple docstring"""
a = get_test_classes(_A )
a = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_A )
# sort with class names
return sorted(_A , key=lambda A : x.__name__ )
def a( A : List[Any] , A : Optional[int] ) -> Any:
"""simple docstring"""
a = get_test_classes_for_model(_A , _A )
a = []
for test_class in test_classes:
a = get_model_tester_from_test_class(_A )
if tester_class is not None:
tester_classes.append(_A )
# sort with class names
return sorted(_A , key=lambda A : x.__name__ )
def a( A : Any ) -> List[Any]:
"""simple docstring"""
a = get_test_classes(_A )
a = {test_class: get_model_tester_from_test_class(_A ) for test_class in test_classes}
return test_tester_mapping
def a( A : Tuple ) -> Any:
"""simple docstring"""
a = get_model_classes(_A )
a = {
model_class: get_test_classes_for_model(_A , _A ) for model_class in model_classes
}
return model_test_mapping
def a( A : str ) -> Any:
"""simple docstring"""
a = get_model_classes(_A )
a = {
model_class: get_tester_classes_for_model(_A , _A ) for model_class in model_classes
}
return model_to_tester_mapping
def a( A : Any ) -> List[str]:
"""simple docstring"""
if isinstance(_A , _A ):
return o
elif isinstance(_A , _A ):
return o.__name__
elif isinstance(_A , (list, tuple) ):
return [to_json(_A ) for x in o]
elif isinstance(_A , _A ):
return {to_json(_A ): to_json(_A ) for k, v in o.items()}
else:
return o
| 227 |
def __UpperCamelCase ( _A : Dict ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =[0] * len(_A )
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
lowerCamelCase_ =queue.pop(0 )
cnt += 1
topo.append(_A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_A )
if cnt != len(_A ):
print("""Cycle exists""" )
else:
print(_A )
# Adjacency List of Graph
__A : List[Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 154 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
UpperCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Dict = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : int = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : Union[str, Any] = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
A: List[Any] = Accelerator()
A: int = (accelerator.state.process_index + 2, 1_0)
A: List[str] = torch.randint(0, 1_0, shape).to(accelerator.device)
A: int = """"""
A: Dict = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
A: Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
A: Union[str, Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 351 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A: str = None
A: List[Any] = logging.get_logger(__name__)
A: Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A: Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A: Tuple = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
A: Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Tuple = ['input_ids', 'attention_mask']
__lowerCAmelCase : str = MBartTokenizer
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = vocab_file
UpperCAmelCase : Optional[int] = False if not self.vocab_file else True
UpperCAmelCase : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCAmelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase : int = src_lang if src_lang is not None else """en_XX"""
UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase : List[str] = src_lang
UpperCAmelCase : Union[str, Any] = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "en_XX" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "ro_RO" , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : int = src_lang
UpperCAmelCase : Dict = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = []
UpperCAmelCase : Tuple = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Tuple = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = []
UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
UpperCAmelCase : Any = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 76 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''biogpt'''
def __init__( self : int , __UpperCAmelCase : Optional[Any]=42384 , __UpperCAmelCase : List[Any]=1024 , __UpperCAmelCase : Dict=24 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Optional[int]=4096 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : List[Any]=1024 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Union[str, Any]=1E-12 , __UpperCAmelCase : str=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : int=1 , __UpperCAmelCase : Any=0 , __UpperCAmelCase : str=2 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = scale_embedding
_A = use_cache
_A = layerdrop
_A = activation_dropout
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 79 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 | 1 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__A = logging.get_logger(__name__)
def __A ( _lowercase , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def __A ( _lowercase , _lowercase , _lowercase = None ) -> Optional[Any]:
'''simple docstring'''
_A = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
_A = to_pil_image(_lowercase )
_A ,_A = pil_image.size
_A = pytesseract.image_to_data(_lowercase , lang=_lowercase , output_type='''dict''' , config=_lowercase )
_A ,_A ,_A ,_A ,_A = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_A = [idx for idx, word in enumerate(_lowercase ) if not word.strip()]
_A = [word for idx, word in enumerate(_lowercase ) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_A = []
for x, y, w, h in zip(_lowercase , _lowercase , _lowercase , _lowercase ):
_A = [x, y, x + w, y + h]
actual_boxes.append(_lowercase )
# finally, normalize the bounding boxes
_A = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowercase , _lowercase , _lowercase ) )
assert len(_lowercase ) == len(_lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = ["pixel_values"]
def __init__( self: str , __A: bool = True , __A: Dict[str, int] = None , __A: PILImageResampling = PILImageResampling.BILINEAR , __A: bool = True , __A: Optional[str] = None , __A: Optional[str] = "" , **__A: Tuple , ) -> None:
super().__init__(**__A )
_A = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_A = get_size_dict(__A )
_A = do_resize
_A = size
_A = resample
_A = apply_ocr
_A = ocr_lang
_A = tesseract_config
def __A ( self: str , __A: np.ndarray , __A: Dict[str, int] , __A: PILImageResampling = PILImageResampling.BILINEAR , __A: Optional[Union[str, ChannelDimension]] = None , **__A: Optional[Any] , ) -> np.ndarray:
_A = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_A = (size['''height'''], size['''width'''])
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def __A ( self: Tuple , __A: ImageInput , __A: bool = None , __A: Dict[str, int] = None , __A: PILImageResampling = None , __A: bool = None , __A: Optional[str] = None , __A: Optional[str] = None , __A: Optional[Union[str, TensorType]] = None , __A: ChannelDimension = ChannelDimension.FIRST , **__A: int , ) -> PIL.Image.Image:
_A = do_resize if do_resize is not None else self.do_resize
_A = size if size is not None else self.size
_A = get_size_dict(__A )
_A = resample if resample is not None else self.resample
_A = apply_ocr if apply_ocr is not None else self.apply_ocr
_A = ocr_lang if ocr_lang is not None else self.ocr_lang
_A = tesseract_config if tesseract_config is not None else self.tesseract_config
_A = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
_A = [to_numpy_array(__A ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_A = []
_A = []
for image in images:
_A ,_A = apply_tesseract(__A , __A , __A )
words_batch.append(__A )
boxes_batch.append(__A )
if do_resize:
_A = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_A = [flip_channel_order(__A ) for image in images]
_A = [to_channel_dimension_format(__A , __A ) for image in images]
_A = BatchFeature(data={'''pixel_values''': images} , tensor_type=__A )
if apply_ocr:
_A = words_batch
_A = boxes_batch
return data
| 367 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "facebook/bart-large-mnli"
A_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A_ = "text_classifier"
A_ = AutoTokenizer
A_ = AutoModelForSequenceClassification
A_ = ["text", ["text"]]
A_ = ["text"]
def __A ( self: int ) -> str:
super().setup()
_A = self.model.config
_A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_A = int(__A )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: List[str] ) -> int:
_A = labels
return self.pre_processor(
[text] * len(__A ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __A ( self: str , __A: List[Any] ) -> Union[str, Any]:
_A = outputs.logits
_A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 75 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int , snake_case__ : int ):
while second != 0:
A = first & second
first ^= second
A = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = int(input('''Enter the first number: ''').strip())
_lowercase = int(input('''Enter the second number: ''').strip())
print(F"""{add(first, second) = }""") | 74 |
"""simple docstring"""
from string import ascii_uppercase
_lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase = dict(enumerate(ascii_uppercase))
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = len(snake_case__ )
A = 0
while True:
if x == i:
A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _snake_case ( ):
A = 'THE GERMAN ATTACK'
A = 'SECRET'
A = generate_key(snake_case__ , snake_case__ )
A = cipher_text(snake_case__ , snake_case__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 74 | 1 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case = Vector()
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase__ ) , '(0,0,0,0,0,1)' )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2] )
_snake_case = Vector([1, 2, 3, 4, 5] )
_snake_case = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([2, -1, 4] ) # for test of dot product
_snake_case = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase ( self ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def lowerCAmelCase ( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 2, 3] )
_snake_case = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase__ , UpperCamelCase__ ) ) , '(3,4,7)' )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 0, 0, 0, 0, 0] )
_snake_case = x.copy()
self.assertEqual(str(UpperCamelCase__ ) , str(UpperCamelCase__ ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase__ ) , '(0,1,0)' )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(UpperCamelCase__ ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(UpperCamelCase__ ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def lowerCAmelCase ( self ) -> None:
_snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def lowerCAmelCase ( self ) -> None:
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 369 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
_snake_case = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
_snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 295 | 0 |
UpperCAmelCase__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def _a ( a :int ) -> int:
a = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase__ = [None] * 10000000
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def _a ( a :int ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
a = chain(next_number(a ) )
a = number_chain
while number < 10_000_000:
a = number_chain
number *= 10
return number_chain
def _a ( a :int = 10_000_000 ) -> int:
for i in range(1 , a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ = pytest.mark.integration
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
a = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
a = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
a = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
a , a = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
a = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
a = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__UpperCAmelCase )
a , a = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a = np.eye(5 , dtype=np.floataa )[::-1]
a , a = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
import faiss
a = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
a = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = faiss.IndexFlat(5 )
a = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
a = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _a ( a :Dict ) -> Any:
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a = '''index.faiss'''
a = F"""mock://{index_name}"""
index.save(a , storage_options=mockfs.storage_options )
a = FaissIndex.load(a , storage_options=mockfs.storage_options )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = Elasticsearch()
a = {'''acknowledged''': True}
a = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase , request_timeout=30 )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 0 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : int ) -> List[str]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__snake_case , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__snake_case , """num_encoder_blocks""" ) )
class UpperCAmelCase :
def __init__( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Any=13 , __snake_case : Union[str, Any]=64 , __snake_case : Dict=3 , __snake_case : int=4 , __snake_case : str=[2, 2, 2, 2] , __snake_case : List[str]=[8, 4, 2, 1] , __snake_case : int=[16, 32, 64, 1_28] , __snake_case : Dict=[1, 4, 8, 16] , __snake_case : int=[1, 2, 4, 8] , __snake_case : Dict=True , __snake_case : Optional[Any]=True , __snake_case : int="gelu" , __snake_case : Any=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[Any]=0.02 , __snake_case : Optional[int]=3 , __snake_case : List[Any]=None , ) -> List[str]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_encoder_blocks
_lowerCAmelCase = sr_ratios
_lowerCAmelCase = depths
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = downsampling_rates
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
def lowercase__ ( self : List[str] ) -> Any:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ) -> List[Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[Any] ) -> Optional[int]:
_lowerCAmelCase = SegformerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = _lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowercase__ ( self : Optional[Any] , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] ) -> Dict:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SegformerForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_lowerCAmelCase = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[str] ) -> Tuple:
_lowerCAmelCase = 1
_lowerCAmelCase = SegformerForSemanticSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__snake_case )
_lowerCAmelCase = model(__snake_case , labels=__snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : int ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowercase: List[str] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase: int = True
_lowercase: Union[str, Any] = False
_lowercase: Dict = False
_lowercase: Optional[Any] = False
def lowercase__ ( self : int ) -> int:
_lowerCAmelCase = SegformerModelTester(self )
_lowerCAmelCase = SegformerConfigTester(self , config_class=__snake_case )
def lowercase__ ( self : int ) -> str:
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase__ ( self : str ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__snake_case )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__snake_case )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowercase__ ( self : Tuple ) -> str:
pass
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.attentions
_lowerCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(__snake_case ) , __snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
_lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_lowerCAmelCase = (self.model_tester.image_size // 32) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_lowerCAmelCase = len(__snake_case )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 1 , len(__snake_case ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
_lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowercase__ ( self : Dict ) -> int:
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str ):
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__snake_case ):
continue
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.train()
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = model(**__snake_case ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Optional[Any] ) -> str:
pass
@slow
def lowercase__ ( self : Optional[Any] ) -> str:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = SegformerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __snake_case )
_lowerCAmelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def lowercase__ ( self : int ) -> List[Any]:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __snake_case )
_lowerCAmelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-1 ) )
@slow
def lowercase__ ( self : int ) -> Union[str, Any]:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = outputs.logits.detach().cpu()
_lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] )
_lowerCAmelCase = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __snake_case )
_lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
_lowerCAmelCase = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 355 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowerCAmelCase = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
_lowerCAmelCase = f"{src_lang}-{tgt_lang}"
_lowerCAmelCase = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_lowerCAmelCase = os.path.join(lowerCAmelCase , """README.md""" )
print(f"Generating {path}" )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCAmelCase )
# make sure we are under the root of the project
A__ : Optional[int] =Path(__file__).resolve().parent.parent.parent
A__ : Union[str, Any] =repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A__ , A__ , A__ : Optional[Any] =model_name.split('''-''')
A__ : List[str] =model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 220 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : int = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """cvt"""
def __init__( self , __magic_name__=3 , __magic_name__=[7, 3, 3] , __magic_name__=[4, 2, 2] , __magic_name__=[2, 1, 1] , __magic_name__=[64, 1_92, 3_84] , __magic_name__=[1, 3, 6] , __magic_name__=[1, 2, 10] , __magic_name__=[4.0, 4.0, 4.0] , __magic_name__=[0.0, 0.0, 0.0] , __magic_name__=[0.0, 0.0, 0.0] , __magic_name__=[0.0, 0.0, 0.1] , __magic_name__=[True, True, True] , __magic_name__=[False, False, True] , __magic_name__=["dw_bn", "dw_bn", "dw_bn"] , __magic_name__=[3, 3, 3] , __magic_name__=[1, 1, 1] , __magic_name__=[2, 2, 2] , __magic_name__=[1, 1, 1] , __magic_name__=[1, 1, 1] , __magic_name__=0.0_2 , __magic_name__=1e-12 , **__magic_name__ , ) -> Dict:
super().__init__(**__magic_name__ )
_a = num_channels
_a = patch_sizes
_a = patch_stride
_a = patch_padding
_a = embed_dim
_a = num_heads
_a = depth
_a = mlp_ratio
_a = attention_drop_rate
_a = drop_rate
_a = drop_path_rate
_a = qkv_bias
_a = cls_token
_a = qkv_projection_method
_a = kernel_qkv
_a = padding_kv
_a = stride_kv
_a = padding_q
_a = stride_q
_a = initializer_range
_a = layer_norm_eps
| 168 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : Union[str, Any] = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ["LayoutLMv3FeatureExtractor"]
a_ : List[str] = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 168 | 1 |
from math import sqrt
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__UpperCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"{solution() = }")
| 344 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__a , "num_attention_heads" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=3 , __a=2 , __a=1 , __a=16 , __a=[1_28, 2_56, 3_84] , __a=[4, 6, 8] , __a=[2, 3, 4] , __a=[16, 16, 16] , __a=0 , __a=[2, 2, 2] , __a=[2, 2, 2] , __a=0.0_2 , __a=True , __a=True , __a=2 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = kernel_size
__lowerCAmelCase = stride
__lowerCAmelCase = padding
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = depths
__lowerCAmelCase = key_dim
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = patch_size
__lowerCAmelCase = attention_ratio
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = initializer_range
__lowerCAmelCase = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
def snake_case ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = LevitModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
__lowerCAmelCase = (self.image_size, self.image_size)
__lowerCAmelCase , __lowerCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
__lowerCAmelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowerCAmelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = LevitForImageClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str =(
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__UpperCAmelCase : Tuple =(
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCAmelCase : int =False
__UpperCAmelCase : str =False
__UpperCAmelCase : Optional[Any] =False
__UpperCAmelCase : Any =False
__UpperCAmelCase : int =False
def snake_case ( self ):
__lowerCAmelCase = LevitModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def snake_case ( self ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def snake_case ( self ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def snake_case ( self ):
def check_hidden_states_output(__a , __a , __a ):
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__a ) , __a )
__lowerCAmelCase = (self.model_tester.image_size, self.model_tester.image_size)
__lowerCAmelCase , __lowerCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
__lowerCAmelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowerCAmelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self ):
pass
def snake_case ( self , __a , __a , __a=False ):
__lowerCAmelCase = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def snake_case ( self ):
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__a )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.train()
__lowerCAmelCase = self._prepare_for_class(__a , __a , return_labels=__a )
__lowerCAmelCase = model(**__a ).loss
loss.backward()
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__a ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(__a )
model.gradient_checkpointing_enable()
model.to(__a )
model.train()
__lowerCAmelCase = self._prepare_for_class(__a , __a , return_labels=__a )
__lowerCAmelCase = model(**__a ).loss
loss.backward()
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__a ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
__lowerCAmelCase = problem_type["title"]
__lowerCAmelCase = problem_type["num_labels"]
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.train()
__lowerCAmelCase = self._prepare_for_class(__a , __a , return_labels=__a )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
__lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__a ) as warning_list:
__lowerCAmelCase = model(**__a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def snake_case ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = LevitModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self ):
__lowerCAmelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__a )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__a )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
__lowerCAmelCase = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 57 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57 | 1 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase ) -> float:
'''simple docstring'''
def get_matched_characters(__lowercase , __lowercase ) -> str:
_A = []
_A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_A = int(max(0 , i - limit ) )
_A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__lowercase )
_A = F'''{_stra[0:_stra.index(__lowercase )]} {_stra[_stra.index(__lowercase ) + 1:]}'''
return "".join(__lowercase )
# matching characters
_A = get_matched_characters(__lowercase , __lowercase )
_A = get_matched_characters(__lowercase , __lowercase )
_A = len(__lowercase )
# transposition
_A = (
len([(ca, ca) for ca, ca in zip(__lowercase , __lowercase ) if ca != ca] ) // 2
)
if not match_count:
_A = 0.0
else:
_A = (
1
/ 3
* (
match_count / len(__lowercase )
+ match_count / len(__lowercase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 174 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''levit'''
def __init__( self : str , __UpperCAmelCase : int=224 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Any=[128, 256, 384] , __UpperCAmelCase : Optional[Any]=[4, 8, 12] , __UpperCAmelCase : Dict=[4, 4, 4] , __UpperCAmelCase : Union[str, Any]=[16, 16, 16] , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : str=[2, 2, 2] , __UpperCAmelCase : Optional[Any]=[2, 2, 2] , __UpperCAmelCase : int=0.02 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = image_size
_A = num_channels
_A = kernel_size
_A = stride
_A = padding
_A = hidden_sizes
_A = num_attention_heads
_A = depths
_A = key_dim
_A = drop_path_rate
_A = patch_size
_A = attention_ratio
_A = mlp_ratio
_A = initializer_range
_A = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return 1E-4
| 174 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_a = '''maskformer-swin'''
_a = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCAmelCase=224 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=96 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 12, 24] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1e-5 , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =embed_dim
_lowercase =depths
_lowercase =len(lowerCAmelCase_ )
_lowercase =num_heads
_lowercase =window_size
_lowercase =mlp_ratio
_lowercase =qkv_bias
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =drop_path_rate
_lowercase =hidden_act
_lowercase =use_absolute_embeddings
_lowercase =layer_norm_eps
_lowercase =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowercase =int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
_lowercase =['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_lowercase =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 205 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( a , a ) -> Dict:
if "xprophetnet" in prophetnet_checkpoint_path:
_A: List[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(a )
_A , _A: Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
else:
_A: Dict = ProphetNetForConditionalGenerationOld.from_pretrained(a )
_A , _A: Tuple = ProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
_A: Optional[int] = ['''key_proj''', '''value_proj''', '''query_proj''']
_A: List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_A: List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
_A: Optional[int] = prophet
_A: Tuple = prophet_old
else:
_A: Tuple = prophet.prophetnet
_A: Any = prophet_old.model
_A: int = False
for attribute in attributes:
if attribute in mapping:
_A: Optional[int] = mapping[attribute]
if not hasattr(a , a ) and len(a ) > 0:
_A: int = attribute
elif hasattr(a , a ):
_A: Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_A: Union[str, Any] = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
_A: Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_A: str = old_model.bias
logger.info(f"""{attribute} is initialized""" )
_A: Dict = True
break
elif attribute in special_keys and hasattr(a , '''in_proj_weight''' ):
_A: Optional[int] = old_model.in_proj_weight.shape[0] // 3
_A: Tuple = getattr(a , a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_A: List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_A: List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_A: int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_A: Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_A: List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_A: int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_A: Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_A: Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_A: List[Any] = True
break
if attribute.isdigit():
_A: Tuple = model[int(a )]
_A: int = old_model[int(a )]
else:
_A: Union[str, Any] = getattr(a , a )
if old_attribute == "":
_A: Union[str, Any] = old_model
else:
if not hasattr(a , a ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
_A: List[Any] = getattr(a , a )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 121 | 0 |
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
return number | (1 << position)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
return number & ~(1 << position)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
return number ^ (1 << position)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> bool:
return ((number >> position) & 1) == 1
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class A__ ( _snake_case , unittest.TestCase ):
lowercase = ReformerTokenizer
lowercase = ReformerTokenizerFast
lowercase = True
lowercase = False
lowercase = True
def snake_case_ ( self ) -> str:
'''simple docstring'''
super().setUp()
A_ = ReformerTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = """<s>"""
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(UpperCamelCase__ ) , 1000 )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = """I was born in 92000, and this is falsé."""
A_ = tokenizer.tokenize(UpperCamelCase__ )
A_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(UpperCamelCase__ )
A_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__=15 ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# Simple input
A_ = """This is a simple input"""
A_ = ["""This is a simple input 1""", """This is a simple input 2"""]
A_ = ("""This is a simple input""", """This is a pair""")
A_ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = ReformerTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] , )
A_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = """Hello World!"""
A_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A_ = """ """.join(UpperCamelCase__ )
A_ = self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors="""pt""" )
A_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
A_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A_ = encoded_sequence["""input_ids"""].shape
A_ = ReformerModel(UpperCamelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# fmt: off
A_ = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A_ = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=UpperCamelCase__ , sequences=UpperCamelCase__ , )
| 101 | 0 |
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = [0 for i in range(r + 1 )]
# nc0 = 1
A_ : Tuple = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
A_ : Tuple = min(__lowercase ,__lowercase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 140 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowercase :
__lowercase : float
__lowercase : TreeNode | None = None
__lowercase : TreeNode | None = None
def A ( lowercase ) -> bool:
'''simple docstring'''
def is_valid_tree(lowercase ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
lowercase , lowercase , lowercase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
UpperCamelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
UpperCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(A_ , A_ )
def __UpperCamelCase ( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **A_ )
def __UpperCamelCase ( self , **A_ ) -> Tuple:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **A_ )
def __UpperCamelCase ( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase = self.get_image_processor(do_normalize=A_ )
UpperCamelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(A_ , return_tensors='np' )
UpperCamelCase = processor(images=A_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = 'lower newer'
UpperCamelCase = processor(text=A_ , return_tensors='np' )
UpperCamelCase = tokenizer(A_ , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase = ['cat', 'nasa badge']
UpperCamelCase = processor(text=A_ )
UpperCamelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase = [['cat', 'nasa badge'], ['person']]
UpperCamelCase = processor(text=A_ )
UpperCamelCase = 16
UpperCamelCase = len(A_ )
UpperCamelCase = max([len(A_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase = ['cat', 'nasa badge']
UpperCamelCase = processor(text=A_ )
UpperCamelCase = 16
UpperCamelCase = inputs['input_ids']
UpperCamelCase = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(images=A_ , query_images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(A_ )
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 110 | 0 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = """\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"""
lowerCamelCase__ = """\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"""
lowerCamelCase__ = """\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE_ , hypotheses=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_ )
} | 86 |
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30 | 0 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : Any = str(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE ) == 9 and set(SCREAMING_SNAKE_CASE ) == set('''123456789''' )
def _a ( ):
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
UpperCamelCase__ : Any = 100002 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE ):
return candidate
for base_num in range(333 , 99 , -1 ):
UpperCamelCase__ : int = 1002003 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE ):
return candidate
return None
if __name__ == "__main__":
print(f"{solution() = }")
| 51 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : NestedDataStructureLike[PathLike] , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Optional[Any] = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths}
UpperCamelCase__ : Optional[Any] = Text(
cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
UpperCamelCase__ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
UpperCamelCase__ : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 51 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _snake_case ( _snake_case : int = 1000000 , _snake_case : int = 10 ):
lowerCAmelCase : defaultdict = defaultdict(_snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase : int = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase : Any = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 60 |
"""simple docstring"""
import numpy as np
def _snake_case ( _snake_case : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Tuple ="""glpn"""
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=[2, 2, 2, 2] , UpperCamelCase_=[8, 4, 2, 1] , UpperCamelCase_=[32, 64, 160, 256] , UpperCamelCase_=[7, 3, 3, 3] , UpperCamelCase_=[4, 2, 2, 2] , UpperCamelCase_=[1, 2, 5, 8] , UpperCamelCase_=[4, 4, 4, 4] , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-6 , UpperCamelCase_=64 , UpperCamelCase_=10 , UpperCamelCase_=-1 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
lowercase_ :List[str] = num_channels
lowercase_ :List[str] = num_encoder_blocks
lowercase_ :Union[str, Any] = depths
lowercase_ :Optional[Any] = sr_ratios
lowercase_ :List[Any] = hidden_sizes
lowercase_ :Optional[int] = patch_sizes
lowercase_ :List[Any] = strides
lowercase_ :Dict = mlp_ratios
lowercase_ :Dict = num_attention_heads
lowercase_ :Optional[Any] = hidden_act
lowercase_ :List[Any] = hidden_dropout_prob
lowercase_ :Any = attention_probs_dropout_prob
lowercase_ :Tuple = initializer_range
lowercase_ :Union[str, Any] = drop_path_rate
lowercase_ :Optional[Any] = layer_norm_eps
lowercase_ :Union[str, Any] = decoder_hidden_size
lowercase_ :Dict = max_depth
lowercase_ :Dict = head_in_index
| 356 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def UpperCamelCase ( _a ) -> Union[str, Any]:
'''simple docstring'''
return getitem, k
def UpperCamelCase ( _a , _a ) -> int:
'''simple docstring'''
return setitem, k, v
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
return delitem, k
def UpperCamelCase ( _a , _a , *_a ) -> Any:
'''simple docstring'''
try:
return fun(_a , *_a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : List[Any] = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
SCREAMING_SNAKE_CASE : Tuple = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
SCREAMING_SNAKE_CASE : Any = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
SCREAMING_SNAKE_CASE : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def UpperCamelCase ( _a ) -> List[str]:
'''simple docstring'''
lowercase_ :Optional[Any] = HashMap(initial_block_size=4 )
lowercase_ :Optional[int] = {}
for _, (fun, *args) in enumerate(_a ):
lowercase_ , lowercase_ :List[str] = _run_operation(_a , _a , *_a )
lowercase_ , lowercase_ :List[str] = _run_operation(_a , _a , *_a )
assert my_res == py_res
assert str(_a ) == str(_a )
assert set(_a ) == set(_a )
assert len(_a ) == len(_a )
assert set(my.items() ) == set(py.items() )
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
def is_public(_a ) -> bool:
return not name.startswith('''_''' )
lowercase_ :Dict = {name for name in dir({} ) if is_public(_a )}
lowercase_ :Dict = {name for name in dir(HashMap() ) if is_public(_a )}
assert dict_public_names > hash_public_names
| 252 | 0 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
UpperCamelCase = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don\'t set if you want to train an encoder model from scratch."""
)
} , )
UpperCamelCase = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don\'t set if you want to train a decoder model from scratch."""
)
} , )
UpperCamelCase = field(
default=__A , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
UpperCamelCase = field(
default=__A , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def a__ ( ) -> Optional[Any]:
UpperCAmelCase : str = HfArgumentParser((ModelArguments,) )
((UpperCAmelCase ) , ) : Optional[Any] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
UpperCAmelCase : Tuple = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Dict = True
UpperCAmelCase : Dict = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCAmelCase , decoder_config=UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
UpperCAmelCase : Tuple = decoder_config.decoder_start_token_id
UpperCAmelCase : Optional[Any] = decoder_config.pad_token_id
if decoder_start_token_id is None:
UpperCAmelCase : List[Any] = decoder_config.bos_token_id
if pad_token_id is None:
UpperCAmelCase : int = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
UpperCAmelCase : int = decoder_config.eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
UpperCAmelCase : str = pad_token_id
UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 336 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : torch.FloatTensor
class a__ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__(self , __lowercase = 3 , __lowercase = 3 , __lowercase = ("DownEncoderBlock2D",) , __lowercase = ("UpDecoderBlock2D",) , __lowercase = (64,) , __lowercase = 1 , __lowercase = "silu" , __lowercase = 3 , __lowercase = 32 , __lowercase = 2_56 , __lowercase = 32 , __lowercase = None , __lowercase = 0.1_8_2_1_5 , __lowercase = "group" , ):
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=__lowercase , out_channels=__lowercase , down_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , double_z=__lowercase , )
__lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
__lowerCAmelCase = VectorQuantizer(__lowercase , __lowercase , beta=0.2_5 , remap=__lowercase , sane_index_shape=__lowercase )
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=__lowercase , out_channels=__lowercase , up_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , norm_type=__lowercase , )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = self.encoder(__lowercase )
__lowerCAmelCase = self.quant_conv(__lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowercase )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = False , __lowercase = True ):
# also go through quantization layer
if not force_not_quantize:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.quantize(__lowercase )
else:
__lowerCAmelCase = h
__lowerCAmelCase = self.post_quant_conv(__lowercase )
__lowerCAmelCase = self.decoder(__lowercase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(__lowercase ).latents
__lowerCAmelCase = self.decode(__lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
| 174 | 0 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
snake_case__ : Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
snake_case__ : List[Any] = 1
if upper_limit > 0:
snake_case__ : Union[str, Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
A__ = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 371 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
A__ = False
class a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Dict = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = pipe.dual_guided(
prompt='''first prompt''' ,image=__lowercase ,text_to_image_strength=0.75 ,generator=__lowercase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowercase )
snake_case__ : Any = VersatileDiffusionPipeline.from_pretrained(__lowercase ,torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : List[str] = generator.manual_seed(0 )
snake_case__ : Any = pipe.dual_guided(
prompt='''first prompt''' ,image=__lowercase ,text_to_image_strength=0.75 ,generator=__lowercase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowerCamelCase ( self :Dict ):
snake_case__ : Optional[int] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : List[Any] = '''cyberpunk 2077'''
snake_case__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
snake_case__ : Optional[int] = torch.manual_seed(0 )
snake_case__ : Any = pipe.dual_guided(
prompt=__lowercase ,image=__lowercase ,text_to_image_strength=0.75 ,generator=__lowercase ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''' ,).images
snake_case__ : int = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : List[str] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ : Any = '''A painting of a squirrel eating a burger '''
snake_case__ : List[str] = torch.manual_seed(0 )
snake_case__ : int = pipe.text_to_image(
prompt=__lowercase ,generator=__lowercase ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''' ).images
snake_case__ : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : str = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ : List[Any] = pipe.image_variation(__lowercase ,generator=__lowercase ,output_type='''numpy''' ).images
snake_case__ : str = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 44 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
UpperCAmelCase : Any = "A painting of a squirrel eating a burger"
UpperCAmelCase : Any = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : int = sd_pipe.prepare_inputs(lowercase_ )
UpperCAmelCase : str = replicate(lowercase_ )
UpperCAmelCase : Tuple = shard(lowercase_ )
UpperCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
UpperCAmelCase : Tuple = jax.random.split(lowercase_ , jax.device_count() )
UpperCAmelCase : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=2_5 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCAmelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : Dict = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCAmelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Union[str, Any] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = "stabilityai/stable-diffusion-2"
UpperCAmelCase , UpperCAmelCase : Optional[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
UpperCAmelCase , UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
UpperCAmelCase : Tuple = scheduler_params
UpperCAmelCase : str = "A painting of a squirrel eating a burger"
UpperCAmelCase : Dict = jax.device_count()
UpperCAmelCase : Optional[Any] = num_samples * [prompt]
UpperCAmelCase : List[str] = sd_pipe.prepare_inputs(lowercase_ )
UpperCAmelCase : Tuple = replicate(lowercase_ )
UpperCAmelCase : Dict = shard(lowercase_ )
UpperCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
UpperCAmelCase : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=2_5 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCAmelCase : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Optional[Any] = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 311 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=False, lowercase_=False, lowercase_=2, lowercase_=99, lowercase_=0, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=12, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_="last", lowercase_=None, lowercase_=None, ) -> List[Any]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_lengths
a__ =use_token_type_ids
a__ =use_labels
a__ =gelu_activation
a__ =sinusoidal_embeddings
a__ =causal
a__ =asm
a__ =n_langs
a__ =vocab_size
a__ =n_special
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =summary_type
a__ =use_proj
a__ =scope
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
a__ =random_attention_mask([self.batch_size, self.seq_length] )
a__ =None
if self.use_input_lengths:
a__ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
a__ =ids_tensor([self.batch_size], 2 ).float()
a__ =ids_tensor([self.batch_size], self.num_choices )
a__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, lengths=lowercase_, langs=lowercase_ )
a__ =model(lowercase_, langs=lowercase_ )
a__ =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> str:
"""simple docstring"""
a__ =FlaubertWithLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertForQuestionAnsweringSimple(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, p_mask=lowercase_, )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, )
((a__), ) =result_with_labels.to_tuple()
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
((a__), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[int]:
"""simple docstring"""
a__ =self.num_labels
a__ =FlaubertForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =self.num_choices
a__ =FlaubertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =model(
lowercase_, attention_mask=lowercase_, token_type_ids=lowercase_, labels=lowercase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> str:
"""simple docstring"""
a__ =super()._prepare_for_class(lowercase_, lowercase_, return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =FlaubertModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, emb_dim=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_ )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =FlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a__ =True
a__ =model_class(config=lowercase_ )
a__ =self._prepare_for_class(lowercase_, lowercase_ )
a__ =torch.jit.trace(
lowercase_, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_, os.path.join(lowercase_, '''traced_model.pt''' ) )
a__ =torch.jit.load(os.path.join(lowercase_, '''traced_model.pt''' ), map_location=lowercase_ )
loaded(inputs_dict['''input_ids'''].to(lowercase_ ), inputs_dict['''attention_mask'''].to(lowercase_ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
a__ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
a__ =model(lowercase_ )[0]
a__ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowercase_ )
a__ =torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
| 188 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a =logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : Dict = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=lowerCamelCase__ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=lowerCamelCase__ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=lowerCamelCase__ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=lowerCamelCase__ , default='data/dump' , help='The dump file prefix.' )
__lowerCamelCase : int = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
__lowerCamelCase : int = BertTokenizer.from_pretrained(args.tokenizer_name )
__lowerCamelCase : str = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__lowerCamelCase : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__lowerCamelCase : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__lowerCamelCase : Optional[int] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__lowerCamelCase : str = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__lowerCamelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__lowerCamelCase : Optional[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__lowerCamelCase : Union[str, Any] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
__lowerCamelCase : List[Any] = fp.readlines()
logger.info('Start encoding' )
logger.info(F"{len(lowerCamelCase__ )} examples to process." )
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Tuple = 1_0_0_0_0
__lowerCamelCase : Any = time.time()
for text in data:
__lowerCamelCase : Union[str, Any] = F"{bos} {text.strip()} {sep}"
__lowerCamelCase : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
rslt.append(lowerCamelCase__ )
iter += 1
if iter % interval == 0:
__lowerCamelCase : List[str] = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
__lowerCamelCase : Any = time.time()
logger.info('Finished binarization' )
logger.info(F"{len(lowerCamelCase__ )} examples processed." )
__lowerCamelCase : Any = F"{args.dump_file}.{args.tokenizer_name}.pickle"
__lowerCamelCase : int = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
__lowerCamelCase : List[Any] = [np.uintaa(lowerCamelCase__ ) for d in rslt]
else:
__lowerCamelCase : int = [np.intaa(lowerCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(lowerCamelCase__ , 'wb' ) as handle:
pickle.dump(rslt_ , lowerCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 371 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
a =argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
a =parser.parse_args()
a ="""cpu"""
a ="""a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
a ="""path-to-your-trained-model"""
a =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
a =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a =pipe.to(device)
# to channels last
a =pipe.unet.to(memory_format=torch.channels_last)
a =pipe.vae.to(memory_format=torch.channels_last)
a =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
a =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
a =torch.randn(2, 4, 64, 64)
a =torch.rand(1) * 999
a =torch.randn(2, 77, 768)
a =(sample, timestep, encoder_hidden_status)
try:
a =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
a =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
a =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
a =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
a =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
a =666
a =torch.Generator(device).manual_seed(seed)
a ={"""generator""": generator}
if args.steps is not None:
a =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
a =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 113 | 0 |
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : str = [0 for i in range(r + 1 )]
# nc0 = 1
__lowerCAmelCase : int = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__lowerCAmelCase : List[str] = min(lowercase__ , lowercase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 275 |
def _lowercase ( lowercase__ ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
__lowerCAmelCase : int = sorted(string.lower() )
return len(lowercase__ ) == len(set(lowercase__ ) )
if __name__ == "__main__":
_UpperCamelCase = input("Enter a string ").strip()
_UpperCamelCase = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 275 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , **__snake_case ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 213 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a_ ( self , __snake_case=0 ):
snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__snake_case ) )
snake_case = np.random.RandomState(__snake_case )
snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
# warmup pass to apply optimizations
snake_case = pipe(**self.get_dummy_inputs() )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
@property
def a_ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self ):
snake_case = ort.SessionOptions()
snake_case = False
return options
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = '''A fantasy landscape, trending on artstation'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__snake_case , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case = init_image.resize((7_6_8, 5_1_2) )
snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = '''A fantasy landscape, trending on artstation'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__snake_case , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 213 | 1 |
from math import isqrt, loga
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : str = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 800_800 , SCREAMING_SNAKE_CASE = 800_800 ):
A_ : Optional[Any] = degree * loga(SCREAMING_SNAKE_CASE )
A_ : int = int(SCREAMING_SNAKE_CASE )
A_ : List[Any] = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
A_ : Any = 0
A_ : Tuple = 0
A_ : Any = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 186 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Any = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Tuple = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : str = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and feature.dtype == "binary":
A_ : Union[str, Any] = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return None if batch_size is np.inf else batch_size
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->str:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ : str = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
A_ : Optional[int] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
A_ : Union[str, Any] = Parquet(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
if self.streaming:
A_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : List[str] = None
A_ : List[str] = None
A_ : List[Any] = None
A_ : Dict = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A_ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = dataset
A_ : Union[str, Any] = path_or_buf
A_ : Any = batch_size or get_writer_batch_size(dataset.features )
A_ : Optional[int] = parquet_writer_kwargs
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Union[str, Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
A_ : str = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
A_ : Tuple = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : List[Any] = 0
A_ : int = parquet_writer_kwargs.pop('''path_or_buf''' , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.dataset.features.arrow_schema
A_ : List[str] = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
A_ : List[Any] = query_table(
table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 186 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354 |
"""simple docstring"""
import cva
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : float , snake_case : int ):
'''simple docstring'''
if k in (0.04, 0.06):
A__ : Optional[int] = k
A__ : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : List[Any] ):
'''simple docstring'''
return str(self.k )
def _UpperCamelCase ( self : int , snake_case : str ):
'''simple docstring'''
A__ : List[str] = cva.imread(snake_case , 0 )
A__ , A__ : Union[str, Any] = img.shape
A__ : list[list[int]] = []
A__ : Optional[Any] = img.copy()
A__ : List[str] = cva.cvtColor(snake_case , cva.COLOR_GRAY2RGB )
A__ , A__ : List[Any] = np.gradient(snake_case )
A__ : List[Any] = dx**2
A__ : Any = dy**2
A__ : Dict = dx * dy
A__ : Any = 0.04
A__ : Optional[Any] = self.window_size // 2
for y in range(snake_case , h - offset ):
for x in range(snake_case , w - offset ):
A__ : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : int = (wxx * wyy) - (wxy**2)
A__ : Any = wxx + wyy
A__ : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ , A_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 296 | 0 |
"""simple docstring"""
__magic_name__ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__magic_name__ = [{"type": "code", "content": INSTALL_CONTENT}]
__magic_name__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 100 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = """bert-generation"""
def __init__( self , lowerCAmelCase=5_03_58 , lowerCAmelCase=10_24 , lowerCAmelCase=24 , lowerCAmelCase=16 , lowerCAmelCase=40_96 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
| 150 | 0 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def SCREAMING_SNAKE_CASE ( ):
_lowerCAmelCase : Tuple = torch.nn.Linear(2 ,4 )
_lowerCAmelCase : Union[str, Any] = torch.optim.AdamW(model.parameters() ,lr=1.0 )
_lowerCAmelCase : Tuple = torch.optim.lr_scheduler.OneCycleLR(_lowerCamelCase ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
_lowerCAmelCase : Tuple = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_lowerCAmelCase : List[Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ):
_lowerCAmelCase : List[str] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowerCamelCase )
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_cuda
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a__ ):
_lowerCAmelCase : Tuple = Accelerator(cpu=a__ )
def __A ( self ):
_lowerCAmelCase : Dict = Accelerator()
_lowerCAmelCase : Any = GradientState()
assert state.num_steps == 1
_lowerCAmelCase : Optional[int] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_lowerCAmelCase : Dict = False
assert state.sync_gradients is False
GradientState._reset_state()
def __A ( self ):
_lowerCAmelCase : Optional[int] = Accelerator()
_lowerCAmelCase : Any = create_components()
(
_lowerCAmelCase
) : int = accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : List[Any] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __A ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a__ , **a__ ):
pass
with patch("""torch.cuda.set_device""" , a__ ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
_lowerCAmelCase : Dict = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def __A ( self ):
_lowerCAmelCase : Any = Accelerator()
_lowerCAmelCase : List[str] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
_lowerCAmelCase : List[Any] = get_signature(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
def __A ( self ):
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase : str = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
_lowerCAmelCase : Optional[Any] = get_signature(a__ )
# saving hook
def save_config(a__ , a__ , a__ ):
_lowerCAmelCase : Dict = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(a__ , """data.json""" ) , """w""" ) as f:
json.dump(a__ , a__ )
# loading hook
def load_config(a__ , a__ ):
with open(os.path.join(a__ , """data.json""" ) , """r""" ) as f:
_lowerCAmelCase : int = json.load(a__ )
_lowerCAmelCase : str = config["""class_name"""]
_lowerCAmelCase : Union[str, Any] = accelerator.register_save_state_pre_hook(a__ )
_lowerCAmelCase : int = accelerator.register_load_state_pre_hook(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : Dict = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks removed
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : Optional[Any] = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Any = create_components()
_lowerCAmelCase : Any = None
# This should work
_lowerCAmelCase : int = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertTrue(dummy_obj is None )
def __A ( self ):
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase : Any = create_components()
_lowerCAmelCase : Optional[int] = [1, 2, 3]
# This should work
_lowerCAmelCase : str = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map={"""""": 0} , )
_lowerCAmelCase : List[str] = Accelerator()
# This should work
_lowerCAmelCase : List[Any] = accelerator.prepare(a__ )
@slow
@require_bnb
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Any = Accelerator()
with init_empty_weights():
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : int = infer_auto_device_map(a__ )
_lowerCAmelCase : Optional[Any] = """cpu"""
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=a__ , load_in_abit=a__ , llm_inta_enable_fpaa_cpu_offload=a__ )
# This should not work and get value error
with self.assertRaises(a__ ):
_lowerCAmelCase : List[str] = accelerator.prepare(a__ )
@slow
@require_bnb
@require_multi_gpu
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Dict = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : List[str] = infer_auto_device_map(a__ )
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
_lowerCAmelCase : Tuple = Accelerator()
# This should not work and get value error
with self.assertRaises(a__ ):
_lowerCAmelCase : Optional[int] = accelerator.prepare(a__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __A ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
_lowerCAmelCase : int = infer_auto_device_map(a__ )
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
_lowerCAmelCase : str = Accelerator()
# This should work
_lowerCAmelCase : str = accelerator.prepare(a__ )
@require_cuda
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Any = torch.optim.SGD(model.parameters() , lr=0.0_1 )
_lowerCAmelCase : List[str] = Accelerator(cpu=a__ )
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
| 365 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : List[Any] = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = "maskformer-swin"
_UpperCamelCase : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=None , a__=None , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : int = embed_dim
_lowerCAmelCase : Optional[Any] = depths
_lowerCAmelCase : List[str] = len(a__ )
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : List[Any] = mlp_ratio
_lowerCAmelCase : Optional[Any] = qkv_bias
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : int = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(a__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : int = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 126 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCamelCase = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class _a ( unittest.TestCase):
@classmethod
def UpperCAmelCase__( cls : str )-> str:
lowerCAmelCase__ : Optional[Any] = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase__( cls : List[str] )-> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def UpperCAmelCase__( self : int )-> Union[str, Any]:
lowerCAmelCase__ : Optional[int] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : str = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowerCAmelCase__ : int = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
lowerCAmelCase__ : int = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
lowerCAmelCase__ : str = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'{key} not identical' )
def UpperCAmelCase__( self : List[str] )-> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ : int = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : str = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ : Dict = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'{key} not identical' )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Any = flatten_dict(modela.params )
lowerCAmelCase__ : Any = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowerCAmelCase__ : List[str] = False
return models_are_equal
@require_flax
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : Optional[int] )-> str:
lowerCAmelCase__ : int = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : int = FlaxBertModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__( self : Any )-> Optional[int]:
lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ : Any = FlaxBertModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__( self : Tuple )-> List[Any]:
lowerCAmelCase__ : Tuple = '''bert'''
lowerCAmelCase__ : str = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] )-> Any:
lowerCAmelCase__ : Dict = '''bert'''
lowerCAmelCase__ : Optional[Any] = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : int = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 131 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase = logging.get_logger(__name__)
class _a ( _lowercase):
_a : Optional[Any] = ['''pixel_values''']
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **_SCREAMING_SNAKE_CASE : int , )-> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 256}
lowerCAmelCase__ : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase__ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Any = resample
lowerCAmelCase__ : str = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : str = do_rescale
lowerCAmelCase__ : List[str] = rescale_factor
lowerCAmelCase__ : int = do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Dict , )-> np.ndarray:
lowerCAmelCase__ : str = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ : List[str] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : List[str] , )-> np.ndarray:
lowerCAmelCase__ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Optional[int] )-> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : str , )-> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : ImageInput , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE : Tuple , )-> Optional[Any]:
lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : List[str] = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ : List[Any] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCAmelCase__ : Dict = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCAmelCase__ : List[Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCAmelCase__ : Tuple = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Tuple] = None )-> List[Any]:
lowerCAmelCase__ : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Tuple = target_sizes.numpy()
lowerCAmelCase__ : Tuple = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Any = logits.argmax(dim=1 )
lowerCAmelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 131 | 1 |
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCamelCase = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(UpperCamelCase_ )
else:
UpperCamelCase = sylvester(number - 1 )
UpperCamelCase = num - 1
UpperCamelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 165 | from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_SCREAMING_SNAKE_CASE = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
_SCREAMING_SNAKE_CASE = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """whisper"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowerCamelCase_ : Tuple=5_1865 , lowerCamelCase_ : Dict=80 , lowerCamelCase_ : str=6 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : Optional[int]=1536 , lowerCamelCase_ : int=1536 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : str=5_0257 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : int=256 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Any=0.0_2 , lowerCamelCase_ : str=False , lowerCamelCase_ : List[str]=1500 , lowerCamelCase_ : Dict=448 , lowerCamelCase_ : Tuple=5_0256 , lowerCamelCase_ : Tuple=5_0256 , lowerCamelCase_ : List[Any]=5_0256 , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[int]=[220, 5_0256] , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Dict=256 , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : List[Any]=0.0_5 , lowerCamelCase_ : Dict=10 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : str=10 , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : Optional[int]=7 , **lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = num_mel_bins
UpperCamelCase = d_model
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = max_source_positions
UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , suppress_tokens=lowerCamelCase_ , begin_suppress_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
UpperCamelCase = {0: """batch"""}
else:
UpperCamelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction="""inputs""" )
return common_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 2_2050 , lowerCamelCase_ : float = 5.0 , lowerCamelCase_ : int = 220 , ):
"""simple docstring"""
UpperCamelCase = OrderedDict()
UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase_ , framework=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , time_duration=lowerCamelCase_ , frequency=lowerCamelCase_ , )
UpperCamelCase = encoder_inputs["""input_features"""].shape[2]
UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = encoder_inputs.pop("""input_features""" )
UpperCamelCase = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
UpperCamelCase = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return 1E-3
| 165 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCamelCase )
class __snake_case ( _lowerCamelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowerCamelCase = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
__lowerCamelCase = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
__lowerCamelCase = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
__lowerCamelCase = "question"
__lowerCamelCase = "context"
__lowerCamelCase = "answers"
@property
def __a ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 143 | import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = logging.get_logger(__name__)
def UpperCamelCase__ ( A__ , A__=False ) -> List[Any]:
snake_case__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = ''
else:
snake_case__ : List[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : int = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : int = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( A__ , A__ , A__ ) -> str:
snake_case__ : Optional[int] = dct.pop(A__ )
snake_case__ : int = val
def UpperCamelCase__ ( ) -> Dict:
snake_case__ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Any = 1000
snake_case__ : Union[str, Any] = 'huggingface/label-files'
snake_case__ : int = 'imagenet-1k-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : int = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
snake_case__ : Optional[int] = 192
snake_case__ : str = 768
snake_case__ : Optional[Any] = 12
snake_case__ : Tuple = 3
elif deit_name[9:].startswith('small' ):
snake_case__ : str = 384
snake_case__ : str = 1536
snake_case__ : Dict = 12
snake_case__ : str = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
snake_case__ : List[Any] = 1024
snake_case__ : str = 4096
snake_case__ : Tuple = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : Optional[int] = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Tuple = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
snake_case__ : int = DeiTForImageClassificationWithTeacher(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : List[Any] = DeiTImageProcessor(size=A__ , crop_size=config.image_size )
snake_case__ : Tuple = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : Tuple = encoding['pixel_values']
snake_case__ : Dict = model(A__ )
snake_case__ : Union[str, Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[Any] = '''canine'''
def __init__( self : List[str] , __a : Optional[int]=768 , __a : Any=12 , __a : Any=12 , __a : Dict=3072 , __a : Dict="gelu" , __a : List[Any]=0.1 , __a : List[Any]=0.1 , __a : Tuple=16384 , __a : List[Any]=16 , __a : List[Any]=0.02 , __a : Optional[Any]=1E-12 , __a : Dict=0 , __a : List[Any]=0xe_0_0_0 , __a : Optional[int]=0xe_0_0_1 , __a : Any=4 , __a : Dict=4 , __a : Optional[int]=8 , __a : Any=16384 , __a : Optional[Any]=128 , **__a : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase : int = max_position_embeddings
__lowercase : List[str] = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Tuple = hidden_dropout_prob
__lowercase : Union[str, Any] = attention_probs_dropout_prob
__lowercase : Union[str, Any] = initializer_range
__lowercase : Any = type_vocab_size
__lowercase : int = layer_norm_eps
# Character config:
__lowercase : int = downsampling_rate
__lowercase : str = upsampling_kernel_size
__lowercase : Union[str, Any] = num_hash_functions
__lowercase : Optional[Any] = num_hash_buckets
__lowercase : Optional[int] = local_transformer_stride | 233 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , ):
if attention_mask is None:
__lowercase : Tuple = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowercase : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowercase : Any = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCAmelCase_ )
if decoder_head_mask is None:
__lowercase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ )
if cross_attn_head_mask is None:
__lowercase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : str , __a : Tuple=13 , __a : List[Any]=7 , __a : Any=True , __a : List[str]=False , __a : Optional[Any]=99 , __a : Tuple=16 , __a : int=2 , __a : Optional[Any]=4 , __a : int=4 , __a : Any="relu" , __a : Optional[int]=0.1 , __a : List[str]=0.1 , __a : Dict=0.0 , __a : List[str]=0.0 , __a : Union[str, Any]=20 , __a : str=2 , __a : str=1 , __a : Optional[int]=0 , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = batch_size
__lowercase : Any = seq_length
__lowercase : Tuple = is_training
__lowercase : Optional[Any] = use_labels
__lowercase : Dict = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Tuple = encoder_layerdrop
__lowercase : List[str] = decoder_layerdrop
__lowercase : Any = max_position_embeddings
__lowercase : Any = eos_token_id
__lowercase : Dict = pad_token_id
__lowercase : List[str] = bos_token_id
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : str = self.eos_token_id # Eos Token
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowercase : Tuple = input_ids.clamp(self.pad_token_id + 1 )
__lowercase : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowercase : List[str] = self.get_config()
__lowercase : str = prepare_mam_aaa_inputs_dict(__a , __a , __a )
return config, inputs_dict
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase , __lowercase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : int , __a : str , __a : str ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = MaMaaaModel(config=__a ).get_decoder().to(__a ).eval()
__lowercase : List[str] = inputs_dict["""input_ids"""]
__lowercase : Dict = inputs_dict["""attention_mask"""]
__lowercase : List[Any] = inputs_dict["""head_mask"""]
# first forward pass
__lowercase : List[str] = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
__lowercase , __lowercase : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__lowercase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : str = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__lowercase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__lowercase : Optional[int] = model(__a , attention_mask=__a )["""last_hidden_state"""]
__lowercase : Union[str, Any] = model(__a , attention_mask=__a , past_key_values=__a )[
"""last_hidden_state"""
]
# select random slice
__lowercase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-2 ) )
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : List[str] ) -> str:
"""simple docstring"""
__lowercase : Dict = MaMaaaModel(config=__a ).to(__a ).eval()
__lowercase : Any = model(**__a )
__lowercase : str = outputs.encoder_last_hidden_state
__lowercase : Optional[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : str = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[Any] = MaMaaaEncoder.from_pretrained(__a ).to(__a )
__lowercase : Tuple = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Tuple = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = MaMaaaDecoder.from_pretrained(__a ).to(__a )
__lowercase : Tuple = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__a , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : int = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_A : int = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_A : Union[str, Any] = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_A : Optional[int] = True
_A : Union[str, Any] = True
_A : Any = False
_A : int = False
def lowerCAmelCase ( self : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : int = MaMaaaModelTester(self )
__lowercase : Union[str, Any] = ConfigTester(self , config_class=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : str = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__lowercase : Union[str, Any] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Any = copy.deepcopy(self._prepare_for_class(__a , __a ) )
if not self.is_encoder_decoder:
__lowercase : int = inputs["""input_ids"""]
del inputs["input_ids"]
else:
__lowercase : Optional[int] = inputs["""input_ids"""]
__lowercase : Optional[Any] = inputs.get("""decoder_input_ids""" , __a )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __a )
__lowercase : Union[str, Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
__lowercase : Dict = wte(__a )
else:
__lowercase : str = wte(__a )
__lowercase : Union[str, Any] = wte(__a )
with torch.no_grad():
model(**__a )[0]
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs()
__lowercase : List[str] = input_dict["""input_ids"""]
__lowercase : Optional[int] = input_ids.ne(1 ).to(__a )
__lowercase : List[Any] = MaMaaaForConditionalGeneration(__a ).eval().to(__a )
if torch_device == "cuda":
model.half()
model.generate(__a , attention_mask=__a )
model.generate(num_beams=4 , do_sample=__a , early_stopping=__a , num_return_sequences=3 )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
return torch.tensor(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ )
lowerCamelCase : Dict = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
__lowercase : Union[str, Any] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
__lowercase : int = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
__lowercase : int = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
__lowercase : int = model(**__a )[0]
__lowercase : int = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , __a )
# change to expected output here
__lowercase : Dict = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
# change to intended input
__lowercase : Any = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
__lowercase : Union[str, Any] = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
__lowercase : Tuple = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
__lowercase : Optional[Any] = model(**__a )[0]
__lowercase : Tuple = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __a )
# change to expected output here
__lowercase : Union[str, Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : List[str] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
__lowercase : Tuple = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
__lowercase : Dict = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__lowercase : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors="""pt""" )
__lowercase : Dict = model.generate(
input_ids=dct["""input_ids"""].to(__a ) , attention_mask=dct["""attention_mask"""].to(__a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
__lowercase : Any = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
__lowercase : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__a , skip_special_tokens=__a )
assert generated == expected_en | 233 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( __lowercase ):
lowerCAmelCase_ : Tuple = ["image_processor", "tokenizer"]
lowerCAmelCase_ : Any = "LayoutLMv2ImageProcessor"
lowerCAmelCase_ : List[str] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Tuple , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Tuple ):
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
lowerCAmelCase : int = kwargs.pop('feature_extractor' )
lowerCAmelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase_ : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase_ : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
lowerCAmelCase : Any = self.image_processor(images=snake_case_ , return_tensors=snake_case_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case_ , snake_case_ ):
lowerCAmelCase : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase : List[Any] = features['''words''']
lowerCAmelCase : str = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel values
lowerCAmelCase : List[str] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase : Optional[Any] = self.get_overflowing_images(snake_case_ , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase : List[str] = images
return encoded_inputs
def lowercase__ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCAmelCase : Union[str, Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f" {len(snake_case_ )} and {len(snake_case_ )}" )
return images_with_overflow
def lowercase__ ( self : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowercase__ ( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowercase__ ( self : Any ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self : int ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def lowercase__ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor
| 352 |
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323 | 0 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : int = (EulerDiscreteScheduler,)
a_ : str = 10
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Optional[int]:
a_ = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_lowerCAmelCase)
return config
def UpperCAmelCase__ ( self) ->List[Any]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02]):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**_lowerCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a_ = torch.manual_seed(0)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma
a_ = sample.to(_lowerCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase)
a_ = model(_lowerCAmelCase , _lowerCAmelCase)
a_ = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(_lowerCAmelCase))
a_ = torch.mean(torch.abs(_lowerCAmelCase))
assert abs(result_sum.item() - 10.0_807) < 1E-2
assert abs(result_mean.item() - 0.0_131) < 1E-3
def UpperCAmelCase__ ( self) ->int:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(prediction_type="v_prediction")
a_ = scheduler_class(**_lowerCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a_ = torch.manual_seed(0)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma
a_ = sample.to(_lowerCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase)
a_ = model(_lowerCAmelCase , _lowerCAmelCase)
a_ = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(_lowerCAmelCase))
a_ = torch.mean(torch.abs(_lowerCAmelCase))
assert abs(result_sum.item() - 0.0_002) < 1E-2
assert abs(result_mean.item() - 2.2676E-06) < 1E-3
def UpperCAmelCase__ ( self) ->int:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**_lowerCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase)
a_ = torch.manual_seed(0)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a_ = sample.to(_lowerCAmelCase)
for t in scheduler.timesteps:
a_ = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase)
a_ = model(_lowerCAmelCase , _lowerCAmelCase)
a_ = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(_lowerCAmelCase))
a_ = torch.mean(torch.abs(_lowerCAmelCase))
assert abs(result_sum.item() - 10.0_807) < 1E-2
assert abs(result_mean.item() - 0.0_131) < 1E-3
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**_lowerCAmelCase , use_karras_sigmas=_lowerCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase)
a_ = torch.manual_seed(0)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a_ = sample.to(_lowerCAmelCase)
for t in scheduler.timesteps:
a_ = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase)
a_ = model(_lowerCAmelCase , _lowerCAmelCase)
a_ = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(_lowerCAmelCase))
a_ = torch.mean(torch.abs(_lowerCAmelCase))
assert abs(result_sum.item() - 124.52_299_499_511_719) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963) < 1E-3 | 243 |
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float )->float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float , )->float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float , )->float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
lowerCAmelCase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any=12 , __UpperCAmelCase : str=7 , __UpperCAmelCase : str=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : str=32 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : Optional[int]=37 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[str]=512 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Tuple=0 , __UpperCAmelCase : List[Any]=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = dropout
_A = attention_dropout
_A = max_position_embeddings
_A = initializer_range
_A = scope
_A = bos_token_id
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_A = input_mask.numpy()
_A = input_mask.shape
_A = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
_A = 1
_A = 0
_A = self.get_config()
return config, input_ids, tf.convert_to_tensor(__A )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
'''simple docstring'''
_A = TFBlipTextModel(config=__A )
_A = model(__A , attention_mask=__A , training=__A )
_A = model(__A , training=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case = (TFBlipTextModel,) if is_tf_available() else ()
snake_case = False
snake_case = False
snake_case = False
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = BlipTextModelTester(self )
_A = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFBlipTextModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__A )
| 351 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
_A = 0
_A = str(__lowercase )
while len(__lowercase ) != 1:
_A = [int(__lowercase ) for i in num_string]
_A = 1
for i in range(0 , len(__lowercase ) ):
total *= numbers[i]
_A = str(__lowercase )
steps += 1
return steps
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
_A = 0
_A = str(__lowercase )
while len(__lowercase ) != 1:
_A = [int(__lowercase ) for i in num_string]
_A = 0
for i in range(0 , len(__lowercase ) ):
total += numbers[i]
_A = str(__lowercase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _lowercase ( lowercase__ ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase (nn.Module ):
def __init__( self , A_ , A_ ) ->Dict:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : str = module
__lowerCAmelCase : int = nn.Sequential(
nn.Linear(module.in_features , A_ , bias=A_ ) , nn.Linear(A_ , module.out_features , bias=A_ ) , )
__lowerCAmelCase : Optional[int] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCamelCase__ ( self , A_ , *A_ , **A_ ) ->List[str]:
'''simple docstring'''
return self.module(A_ , *A_ , **A_ ) + self.adapter(A_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase (unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_UpperCamelCase = """bigscience/bloom-1b7"""
# Constant values
_UpperCamelCase = 2.109659552692574
_UpperCamelCase = """Hello my name is"""
_UpperCamelCase = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
_UpperCamelCase = 10
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase (_UpperCAmelCase ):
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
super().setUp()
# Models and tokenizer
__lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A_ , device_map='''auto''' )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_abit.config
self.assertTrue(hasattr(A_ , '''quantization_config''' ) )
__lowerCAmelCase : Union[str, Any] = config.to_dict()
__lowerCAmelCase : Any = config.to_diff_dict()
__lowerCAmelCase : Dict = config.to_json_string()
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__lowerCAmelCase : Optional[int] = self.model_fpaa.get_memory_footprint()
__lowerCAmelCase : Union[str, Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowerCAmelCase : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCAmelCase : int = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A_ ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = BitsAndBytesConfig()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A_ , device_map='''auto''' )
__lowerCAmelCase : Tuple = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCAmelCase : Optional[int] = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A_ ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
with self.assertRaises(A_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A_ )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A_ ):
__lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A_ , load_in_abit=A_ , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
with self.assertRaises(A_ ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowerCAmelCase : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCAmelCase : Optional[Any] = self.model_fpaa.to(torch.floataa )
__lowerCAmelCase : Any = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowerCAmelCase : Optional[Any] = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
__lowerCAmelCase : List[str] = self.model_fpaa.half()
# Check this does not throw an error
__lowerCAmelCase : Tuple = self.model_fpaa.float()
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A_ , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase (unittest.TestCase ):
@classmethod
def UpperCamelCase__ ( cls ) ->Any:
'''simple docstring'''
__lowerCAmelCase : str = '''t5-small'''
__lowerCAmelCase : Optional[int] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
__lowerCAmelCase : List[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
__lowerCAmelCase : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules
__lowerCAmelCase : Tuple = None
# test with `t5-small`
__lowerCAmelCase : Dict = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A_ , device_map='''auto''' )
__lowerCAmelCase : Tuple = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCAmelCase : str = model.generate(**A_ )
# test with `flan-t5-small`
__lowerCAmelCase : Optional[int] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A_ , device_map='''auto''' )
__lowerCAmelCase : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCAmelCase : Tuple = model.generate(**A_ )
__lowerCAmelCase : List[str] = modules
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowerCAmelCase : Optional[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A_ , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowerCAmelCase : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCAmelCase : List[str] = model.generate(**A_ )
# test with `flan-t5-small`
__lowerCAmelCase : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A_ , device_map='''auto''' )
__lowerCAmelCase : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCAmelCase : Tuple = model.generate(**A_ )
class __lowercase (_UpperCAmelCase ):
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
# model_name
__lowerCAmelCase : str = '''bigscience/bloom-560m'''
__lowerCAmelCase : Any = '''t5-small'''
# Different types of model
__lowerCAmelCase : int = AutoModel.from_pretrained(self.model_name , load_in_abit=A_ , device_map='''auto''' )
# Sequence classification model
__lowerCAmelCase : int = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A_ , device_map='''auto''' )
# CausalLM model
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A_ , device_map='''auto''' )
# Seq2seq model
__lowerCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A_ , device_map='''auto''' )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase (_UpperCAmelCase ):
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
super().setUp()
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowerCAmelCase : Any = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase (_UpperCAmelCase ):
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
super().setUp()
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A_ , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowerCAmelCase : List[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
__lowerCAmelCase : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A_ ) , self.EXPECTED_OUTPUTS )
class __lowercase (_UpperCAmelCase ):
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : int = '''facebook/opt-350m'''
super().setUp()
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
__lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowerCAmelCase : List[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowerCAmelCase : Dict = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A_ ) ):
__lowerCAmelCase : List[Any] = LoRALayer(module.q_proj , rank=16 )
__lowerCAmelCase : List[Any] = LoRALayer(module.k_proj , rank=16 )
__lowerCAmelCase : int = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowerCAmelCase : str = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowerCAmelCase : Optional[Any] = model.forward(**A_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A_ , A_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """gpt2-xl"""
_UpperCamelCase = 3.3191854854152187
| 275 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _lowercase ( lowercase__ ):
__lowerCAmelCase : str = []
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : str = []
for rt in rc.restypes:
__lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__lowerCAmelCase : List[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Optional[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Tuple = torch.tensor(
lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
__lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : int = residx_atomaa_mask
__lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter]
__lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCAmelCase : str = rc.atom_order[atom_name]
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : Any = residx_atomaa_mask
return protein
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray )
__lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) )
return out
| 275 | 1 |
import operator as op
a__: List[str] = 'scaler.pt'
a__: List[Any] = 'pytorch_model'
a__: Optional[Any] = 'random_states'
a__: int = 'optimizer'
a__: Any = 'scheduler'
a__: str = 'pytorch_model.bin'
a__: Union[str, Any] = 'pytorch_model.bin.index.json'
a__: Any = 'model.safetensors'
a__: Any = 'model.safetensors.index.json'
a__: str = '1.10.2'
a__: List[str] = 'py38'
a__: List[str] = '4.17.0'
a__: Dict = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
a__: List[Any] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
a__: Optional[int] = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
a__: int = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
a__: str = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
a__: List[str] = '2.0.1'
a__: Dict = ['pdsh', 'standard', 'openmpi', 'mvapich']
a__: List[Any] = ['default', 'reduce-overhead', 'max-autotune']
a__: Tuple = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a__: Optional[Any] = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
a__: str = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
a__: Dict = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 39 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self,__lowerCamelCase = 3,__lowerCamelCase = 3,__lowerCamelCase = ("DownEncoderBlock2D",),__lowerCamelCase = ("UpDecoderBlock2D",),__lowerCamelCase = (64,),__lowerCamelCase = 1,__lowerCamelCase = "silu",__lowerCamelCase = 3,__lowerCamelCase = 32,__lowerCamelCase = 256,__lowerCamelCase = 32,__lowerCamelCase = None,__lowerCamelCase = 0.18215,__lowerCamelCase = "group",):
super().__init__()
# pass init params to Encoder
A__ = Encoder(
in_channels=__lowerCamelCase,out_channels=__lowerCamelCase,down_block_types=__lowerCamelCase,block_out_channels=__lowerCamelCase,layers_per_block=__lowerCamelCase,act_fn=__lowerCamelCase,norm_num_groups=__lowerCamelCase,double_z=__lowerCamelCase,)
A__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ = nn.Convad(__lowerCamelCase,__lowerCamelCase,1 )
A__ = VectorQuantizer(__lowerCamelCase,__lowerCamelCase,beta=0.25,remap=__lowerCamelCase,sane_index_shape=__lowerCamelCase )
A__ = nn.Convad(__lowerCamelCase,__lowerCamelCase,1 )
# pass init params to Decoder
A__ = Decoder(
in_channels=__lowerCamelCase,out_channels=__lowerCamelCase,up_block_types=__lowerCamelCase,block_out_channels=__lowerCamelCase,layers_per_block=__lowerCamelCase,act_fn=__lowerCamelCase,norm_num_groups=__lowerCamelCase,norm_type=__lowerCamelCase,)
@apply_forward_hook
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = True ):
A__ = self.encoder(__lowerCamelCase )
A__ = self.quant_conv(__lowerCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowerCamelCase )
@apply_forward_hook
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = False,__lowerCamelCase = True ):
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ = self.quantize(__lowerCamelCase )
else:
A__ = h
A__ = self.post_quant_conv(__lowerCamelCase )
A__ = self.decoder(__lowerCamelCase,quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = True ):
A__ = sample
A__ = self.encode(__lowerCamelCase ).latents
A__ = self.decode(__lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
| 39 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
snake_case__ = 'blenderbot-small'
snake_case__ = ['past_key_values']
snake_case__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple ,lowerCamelCase__ : Dict=50_265 ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : int=2_048 ,lowerCamelCase__ : str=16 ,lowerCamelCase__ : Tuple=8 ,lowerCamelCase__ : Optional[int]=2_048 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Any=0.0 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : List[str]=0.0_2 ,lowerCamelCase__ : Tuple=1 ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : int=0 ,lowerCamelCase__ : List[Any]=1 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Union[str, Any]=2 ,**lowerCamelCase__ : Tuple ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase_ ,bos_token_id=UpperCamelCase_ ,eos_token_id=UpperCamelCase_ ,is_encoder_decoder=UpperCamelCase_ ,decoder_start_token_id=UpperCamelCase_ ,forced_eos_token_id=UpperCamelCase_ ,**UpperCamelCase_ ,)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase__ = {0: 'batch'}
UpperCAmelCase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCAmelCase__ = {0: 'batch', 1: 'decoder_sequence'}
UpperCAmelCase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
for i in range(UpperCamelCase_ ):
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
else:
UpperCAmelCase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = super().outputs
else:
UpperCAmelCase__ = super(UpperCamelCase_ ,self ).outputs
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
for i in range(UpperCamelCase_ ):
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,):
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
# Generate decoder inputs
UpperCAmelCase__ = seq_length if not self.use_past else 1
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
UpperCAmelCase__ = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase__ = dict(**UpperCamelCase_ ,**UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ = common_inputs['input_ids'].shape
UpperCAmelCase__ = common_inputs['decoder_input_ids'].shape[1]
UpperCAmelCase__ , UpperCAmelCase__ = self.num_attention_heads
UpperCAmelCase__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ = decoder_seq_length + 3
UpperCAmelCase__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase__ = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase_ ,UpperCamelCase_ )] ,dim=1 )
UpperCAmelCase__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
UpperCAmelCase__ = min(UpperCamelCase_ ,UpperCamelCase_ )
UpperCAmelCase__ = max(UpperCamelCase_ ,UpperCamelCase_ ) - min_num_layers
UpperCAmelCase__ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
UpperCAmelCase__ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase_ ,UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,):
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCAmelCase__ = seqlen + 2
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
UpperCAmelCase__ , UpperCAmelCase__ = self.num_attention_heads
UpperCAmelCase__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ = common_inputs['attention_mask'].dtype
UpperCAmelCase__ = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase_ ,UpperCamelCase_ ,dtype=UpperCamelCase_ )] ,dim=1 )
UpperCAmelCase__ = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(
UpperCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ = tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
UpperCAmelCase__ = compute_effective_axis_dimension(
UpperCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase__ = dict(tokenizer(UpperCamelCase_ ,return_tensors=UpperCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ ,batch_size=UpperCamelCase_ ,seq_length=UpperCamelCase_ ,is_pair=UpperCamelCase_ ,framework=UpperCamelCase_ )
elif self.task == "causal-lm":
UpperCAmelCase__ = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ ,batch_size=UpperCamelCase_ ,seq_length=UpperCamelCase_ ,is_pair=UpperCamelCase_ ,framework=UpperCamelCase_ )
else:
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ ,batch_size=UpperCamelCase_ ,seq_length=UpperCamelCase_ ,is_pair=UpperCamelCase_ ,framework=UpperCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = super()._flatten_past_key_values_(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
else:
UpperCAmelCase__ = super(UpperCamelCase_ ,self )._flatten_past_key_values_(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
| 98 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : int = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[str] = "luke"
def __init__( self : List[Any] , snake_case__ : Dict=5_0267 , snake_case__ : Tuple=50_0000 , snake_case__ : Optional[Any]=768 , snake_case__ : str=256 , snake_case__ : Tuple=12 , snake_case__ : str=12 , snake_case__ : Union[str, Any]=3072 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Any=512 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Union[str, Any]=1E-12 , snake_case__ : List[Any]=True , snake_case__ : str=None , snake_case__ : Dict=1 , snake_case__ : List[Any]=0 , snake_case__ : str=2 , **snake_case__ : List[str] , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCamelCase_ : Tuple =vocab_size
lowerCamelCase_ : Union[str, Any] =entity_vocab_size
lowerCamelCase_ : Tuple =hidden_size
lowerCamelCase_ : Tuple =entity_emb_size
lowerCamelCase_ : Tuple =num_hidden_layers
lowerCamelCase_ : Optional[int] =num_attention_heads
lowerCamelCase_ : str =hidden_act
lowerCamelCase_ : List[str] =intermediate_size
lowerCamelCase_ : Optional[int] =hidden_dropout_prob
lowerCamelCase_ : Optional[Any] =attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] =max_position_embeddings
lowerCamelCase_ : Any =type_vocab_size
lowerCamelCase_ : Tuple =initializer_range
lowerCamelCase_ : List[Any] =layer_norm_eps
lowerCamelCase_ : int =use_entity_aware_attention
lowerCamelCase_ : Union[str, Any] =classifier_dropout
| 209 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A__ : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
_UpperCAmelCase :int = 10000
_UpperCAmelCase :Optional[List[str]] = None
_UpperCAmelCase :Optional[datasets.Features] = None
class lowercase__ ( datasets.ArrowBasedBuilder ):
_UpperCAmelCase :Optional[int] = ParquetConfig
def UpperCAmelCase__ ( self : Optional[int] ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[Any] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCamelCase_ : str =dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
lowerCamelCase_ : Dict =data_files
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : List[Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ : Any =[dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowerCamelCase_ : Optional[int] =[]
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Optional[int] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ : int =[dl_manager.iter_files(snake_case__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(snake_case__ ):
with open(snake_case__ , "rb" ) as f:
lowerCamelCase_ : List[Any] =datasets.Features.from_arrow_schema(pq.read_schema(snake_case__ ) )
break
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase__ ( self : int , snake_case__ : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ : List[str] =table_cast(snake_case__ , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : int , snake_case__ : Optional[Any] ):
lowerCamelCase_ : Tuple =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
with open(snake_case__ , "rb" ) as f:
lowerCamelCase_ : List[str] =pq.ParquetFile(snake_case__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCamelCase_ : Union[str, Any] =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(snake_case__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(snake_case__ )}: {e}""" )
raise
| 209 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=5_12,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Any:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 90 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def a__ ( self :Dict ):
snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : Dict = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
@slow
def a__ ( self :Union[str, Any] ):
snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : Any = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) ) | 8 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__A : int = logging.getLogger()
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case_ : int = parser.parse_args()
return args.f
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" )
if os.path.exists(lowerCamelCase_ ):
with open(lowerCamelCase_ , """r""" ) as f:
snake_case_ : str = json.load(lowerCamelCase_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
__A : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCamelCase ( lowercase__ ):
@classmethod
def a__ ( cls :Dict ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" )
write_basic_config(save_location=cls.configPath )
snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def a__ ( cls :int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Optional[int] ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[str] = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case_ : Dict = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Tuple ):
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case_ : Optional[int] = get_results(_UpperCamelCase )
self.assertLess(result["""perplexity"""] ,1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Tuple ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[str] = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
self.assertLess(result["""perplexity"""] ,4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : str = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Optional[int] = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertLess(result["""train_loss"""] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[str] ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : Optional[int] = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] ,2_8 )
self.assertGreaterEqual(result["""eval_exact"""] ,2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :List[Any] ):
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : Union[str, Any] = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Union[str, Any] = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :int ):
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
snake_case_ : List[Any] = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : int = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 )
self.assertGreaterEqual(result["""eval_rouge2"""] ,2 )
self.assertGreaterEqual(result["""eval_rougeL"""] ,7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :int ):
snake_case_ : Tuple = self.get_auto_remove_tmp_dir()
snake_case_ : Optional[Any] = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : Any = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) )
@slow
def a__ ( self :Optional[Any] ):
snake_case_ : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
snake_case_ : Dict = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def a__ ( self :Any ):
snake_case_ : Dict = self.get_auto_remove_tmp_dir()
snake_case_ : Tuple = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case_ : str = get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) ) | 8 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase_ : List[str] =KandinskyVaaInpaintPipeline
UpperCamelCase_ : Optional[int] =["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase_ : Union[str, Any] =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase_ : Any =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase_ : Optional[Any] =False
@property
def _A (self ):
return 3_2
@property
def _A (self ):
return 3_2
@property
def _A (self ):
return self.time_input_dim
@property
def _A (self ):
return self.time_input_dim * 4
@property
def _A (self ):
return 1_0_0
@property
def _A (self ):
torch.manual_seed(0 )
__lowercase= {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowercase= UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def _A (self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _A (self ):
torch.manual_seed(0 )
__lowercase= VQModel(**self.dummy_movq_kwargs )
return model
def _A (self ):
__lowercase= self.dummy_unet
__lowercase= self.dummy_movq
__lowercase= DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__UpperCamelCase , )
__lowercase= {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
__lowercase= floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__lowercase= floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCamelCase )
# create init_image
__lowercase= floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__lowercase= image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase= Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
__lowercase= np.ones((6_4, 6_4) , dtype=np.floataa )
__lowercase= 0
if str(__UpperCamelCase ).startswith('mps' ):
__lowercase= torch.manual_seed(__UpperCamelCase )
else:
__lowercase= torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__lowercase= {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def _A (self ):
__lowercase= 'cpu'
__lowercase= self.get_dummy_components()
__lowercase= self.pipeline_class(**__UpperCamelCase )
__lowercase= pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__lowercase= pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
__lowercase= output.images
__lowercase= pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
__lowercase= image[0, -3:, -3:, -1]
__lowercase= image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def _A (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowercase= np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
__lowercase= 0
__lowercase= 'a hat'
__lowercase= KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
__lowercase= KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__lowercase= pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
__lowercase= torch.Generator(device='cpu' ).manual_seed(0 )
__lowercase, __lowercase= pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowercase= pipeline(
image=__UpperCamelCase , mask_image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='np' , )
__lowercase= output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 295 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=3 , __UpperCamelCase=3_2 , __UpperCamelCase=3 , __UpperCamelCase=1_0 , __UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , __UpperCamelCase=[1, 1, 2, 1] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=3 , __UpperCamelCase=None , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = embeddings_size
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = depths
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_act
UpperCamelCase_ = num_labels
UpperCamelCase_ = scope
UpperCamelCase_ = len(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetModel(config=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = FlaxRegNetForImageClassification(config=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A__ : Any = False
A__ : List[Any] = False
A__ : Dict = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ):
"""simple docstring"""
return
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model_class(__UpperCamelCase )
@jax.jit
def model_jitted(__UpperCamelCase , **__UpperCamelCase ):
return model(pixel_values=__UpperCamelCase , **__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class lowercase_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""np""" )
UpperCamelCase_ = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 122 | 0 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase_ : Optional[Any] = float("""nan""")
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : Union[str, Any] ):
UpperCamelCase_: Union[str, Any] = sys.stdout
UpperCamelCase_: Tuple = open(snake_case_ , """a""" )
def __getattr__( self : Optional[int] , snake_case_ : List[Any] ):
return getattr(self.stdout , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
self.stdout.write(snake_case_ )
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""" , """""" , snake_case_ , 0 , re.M ) )
def A__ ( lowerCamelCase=80 , lowerCamelCase=False ) -> int:
UpperCamelCase_: Tuple = []
# deal with critical env vars
UpperCamelCase_: int = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
UpperCamelCase_: Optional[Any] = os.environ.get(lowerCamelCase , lowerCamelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
UpperCamelCase_: Dict = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(lowerCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
UpperCamelCase_: List[str] = []
UpperCamelCase_: Optional[Any] = """"""
while len(lowerCamelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(lowerCamelCase ) == 0 or len(lowerCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowerCamelCase )
UpperCamelCase_: Tuple = """"""
return "\\\n".join(lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase ) -> Any:
# unwrap multi-line input
UpperCamelCase_: List[Any] = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
UpperCamelCase_: Optional[Any] = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
UpperCamelCase_: int = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
UpperCamelCase_: List[Any] = subprocess.run(lowerCamelCase , capture_output=lowerCamelCase , text=lowerCamelCase )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
UpperCamelCase_: str = variation.replace(""" """ , """-""" )
with open(Path(lowerCamelCase ) / F'''log.{prefix}.stdout.txt''' , """w""" ) as f:
f.write(result.stdout )
with open(Path(lowerCamelCase ) / F'''log.{prefix}.stderr.txt''' , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase_: str = json.load(lowerCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> int:
UpperCamelCase_: List[Any] = []
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: Union[str, Any] = F'''{id}: {variation:<{longest_variation_len}}'''
UpperCamelCase_: List[Any] = F'''{preamble}: '''
UpperCamelCase_: Any = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowerCamelCase ) , desc=lowerCamelCase , leave=lowerCamelCase ):
UpperCamelCase_: int = process_run_single(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: List[str] = single_run_metrics[target_metric_key]
if not math.isnan(lowerCamelCase ):
metrics.append(lowerCamelCase )
results.append(lowerCamelCase )
outcome += "✓"
else:
outcome += "✘"
UpperCamelCase_: Any = F'''\33[2K\r{outcome}'''
if len(lowerCamelCase ) > 0:
UpperCamelCase_: int = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
UpperCamelCase_: int = round(mean_metrics[target_metric_key] , 2 )
UpperCamelCase_: Union[str, Any] = F'''{outcome} {mean_target}'''
if len(lowerCamelCase ) > 1:
results_str += F''' {tuple(round(lowerCamelCase , 2 ) for x in results )}'''
print(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = variation
return mean_metrics
else:
print(lowerCamelCase )
return {variation_key: variation, target_metric_key: nan}
def A__ ( ) -> Dict:
UpperCamelCase_: Optional[Any] = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: List[Any] = pd.DataFrame(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = """variation"""
UpperCamelCase_: List[str] = """diff_%"""
UpperCamelCase_: List[str] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
UpperCamelCase_: List[str] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowerCamelCase ):
# as a fallback, use the minimal value as the sentinel
UpperCamelCase_: Tuple = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowerCamelCase ):
UpperCamelCase_: List[Any] = df.apply(
lambda lowerCamelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
UpperCamelCase_: int = [variation_key, target_metric_key, diff_key, *report_metric_keys]
UpperCamelCase_: Tuple = df.reindex(lowerCamelCase , axis="""columns""" ) # reorder cols
# capitalize
UpperCamelCase_: Union[str, Any] = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
UpperCamelCase_: List[str] = df.rename(lambda lowerCamelCase : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
UpperCamelCase_: Tuple = df.rename(lambda lowerCamelCase : c.replace("""_""" , """\n""" ) , axis="""columns""" )
UpperCamelCase_: Tuple = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowerCamelCase , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowerCamelCase , floatfmt=""".2f""" )]
print("""\n\n""".join(lowerCamelCase ) )
def A__ ( ) -> Optional[Any]:
UpperCamelCase_: Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=lowerCamelCase , type=lowerCamelCase , nargs="""+""" , required=lowerCamelCase , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=lowerCamelCase , type=lowerCamelCase , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=lowerCamelCase , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=lowerCamelCase , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=lowerCamelCase , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=lowerCamelCase , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
UpperCamelCase_: Any = parser.parse_args()
UpperCamelCase_: Optional[int] = args.output_dir
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
UpperCamelCase_: Any = get_base_command(lowerCamelCase , lowerCamelCase )
# split each dimension into its --foo variations
UpperCamelCase_: Optional[Any] = [list(map(str.strip , re.split(r"""\|""" , lowerCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
UpperCamelCase_: Dict = list(map(str.strip , map(""" """.join , itertools.product(*lowerCamelCase ) ) ) )
UpperCamelCase_: Optional[int] = max(len(lowerCamelCase ) for x in variations )
# split wanted keys
UpperCamelCase_: Optional[int] = args.report_metric_keys.split()
# capture prints into a log file for convenience
UpperCamelCase_: Optional[Any] = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
UpperCamelCase_: Dict = Tee(lowerCamelCase )
print(F'''\n*** Running {len(lowerCamelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(lowerCamelCase )}''' )
UpperCamelCase_: Union[str, Any] = """variation"""
UpperCamelCase_: List[str] = []
for id, variation in enumerate(tqdm(lowerCamelCase , desc="""Total completion: """ , leave=lowerCamelCase ) ):
UpperCamelCase_: int = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , args.target_metric_key , lowerCamelCase , args.repeat_times , lowerCamelCase , args.verbose , ) )
process_results(lowerCamelCase , args.target_metric_key , lowerCamelCase , args.base_variation , lowerCamelCase )
if __name__ == "__main__":
main()
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = """ctrl"""
__UpperCamelCase : Dict = ["""past_key_values"""]
__UpperCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Dict , snake_case_ : Any=24_6534 , snake_case_ : Dict=256 , snake_case_ : str=1280 , snake_case_ : Optional[int]=8192 , snake_case_ : Union[str, Any]=48 , snake_case_ : Any=16 , snake_case_ : Optional[int]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Any=1e-6 , snake_case_ : Optional[Any]=0.02 , snake_case_ : Optional[int]=True , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Union[str, Any] = vocab_size
UpperCamelCase_: Union[str, Any] = n_positions
UpperCamelCase_: Optional[int] = n_embd
UpperCamelCase_: int = n_layer
UpperCamelCase_: str = n_head
UpperCamelCase_: Optional[int] = dff
UpperCamelCase_: Optional[Any] = resid_pdrop
UpperCamelCase_: Union[str, Any] = embd_pdrop
UpperCamelCase_: List[str] = layer_norm_epsilon
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = use_cache
super().__init__(**snake_case_ )
| 223 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.