code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = CLIPTokenizer
_snake_case : Tuple = CLIPTokenizerFast
_snake_case : List[Any] = True
_snake_case : Dict = {}
_snake_case : Optional[int] = False
def __UpperCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
UpperCAmelCase_ : str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCAmelCase_ : Optional[int] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : Tuple = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
UpperCAmelCase_ : Any = {'unk_token': '<unk>'}
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[str] = 'lower newer'
UpperCAmelCase_ : Any = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : Tuple = 'lower newer'
UpperCAmelCase_ : Any = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[Any] = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
@require_ftfy
def __UpperCAmelCase ( self ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
UpperCAmelCase_ : str = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCAmelCase_ : Any = 'xa\u0303y' + ' ' + 'x\xe3y'
UpperCAmelCase_ : Optional[int] = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : Any = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
UpperCAmelCase_ : int = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCAmelCase_ : Optional[Any] = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
UpperCAmelCase_ : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCAmelCase_ : Any = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ : Optional[int] = f"{text_of_1_token} {text_of_1_token}"
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
UpperCAmelCase_ : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : Optional[int] = f" {text}"
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
UpperCAmelCase_ : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
def __UpperCAmelCase ( self ) -> Any:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def __UpperCAmelCase ( self ) -> Optional[Any]:
super().test_tokenization_python_rust_equals()
def __UpperCAmelCase ( self ) -> List[Any]:
# CLIP always lower cases letters
pass
| 29 |
def __magic_name__ ( A : str ):
'''simple docstring'''
a = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __magic_name__ ( A : str ):
'''simple docstring'''
a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
a = remove_duplicates(key.upper() )
a = len(A )
# First fill cipher with key characters
a = {alphabet[i]: char for i, char in enumerate(A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A ), 26 ):
a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
a = alphabet[i - offset]
a = char
return cipher_alphabet
def __magic_name__ ( A : str, A : dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(A, A ) for ch in message.upper() )
def __magic_name__ ( A : str, A : dict[str, str] ):
'''simple docstring'''
a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A, A ) for ch in message.upper() )
def __magic_name__ ( ):
'''simple docstring'''
a = input("Enter message to encode or decode: " ).strip()
a = input("Enter keyword: " ).strip()
a = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
a = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
a = create_cipher_map(A )
print(func(A, A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 107 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __get__( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]=None ) -> List[Any]:
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
lowerCAmelCase_ : Optional[Any] = '''__cached_''' + self.fget.__name__
lowerCAmelCase_ : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if cached is None:
lowerCAmelCase_ : Optional[int] = self.fget(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return cached
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase_ : int = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
if is_torch_fx_proxy(A__ ):
return True
if is_torch_available():
import torch
if isinstance(A__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(A__ , np.ndarray )
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
return isinstance(A__ , np.ndarray )
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
return _is_numpy(A__ )
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
import torch
return isinstance(A__ , torch.Tensor )
def UpperCamelCase_ ( A__ : Union[str, Any] ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(A__ )
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
import torch
return isinstance(A__ , torch.device )
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(A__ )
def UpperCamelCase_ ( A__ : Optional[Any] ):
'''simple docstring'''
import torch
if isinstance(A__ , A__ ):
if hasattr(A__ , A__ ):
lowerCAmelCase_ : int = getattr(A__ , A__ )
else:
return False
return isinstance(A__ , torch.dtype )
def UpperCamelCase_ ( A__ : Optional[Any] ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(A__ )
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
import tensorflow as tf
return isinstance(A__ , tf.Tensor )
def UpperCamelCase_ ( A__ : Union[str, Any] ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(A__ )
def UpperCamelCase_ ( A__ : Dict ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A__ , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(A__ )
return type(A__ ) == tf.Tensor
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(A__ )
def UpperCamelCase_ ( A__ : Union[str, Any] ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(A__ , jnp.ndarray )
def UpperCamelCase_ ( A__ : Dict ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(A__ )
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
if isinstance(A__ , (dict, UserDict) ):
return {k: to_py_obj(A__ ) for k, v in obj.items()}
elif isinstance(A__ , (list, tuple) ):
return [to_py_obj(A__ ) for o in obj]
elif is_tf_tensor(A__ ):
return obj.numpy().tolist()
elif is_torch_tensor(A__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A__ ):
return np.asarray(A__ ).tolist()
elif isinstance(A__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
if isinstance(A__ , (dict, UserDict) ):
return {k: to_numpy(A__ ) for k, v in obj.items()}
elif isinstance(A__ , (list, tuple) ):
return np.array(A__ )
elif is_tf_tensor(A__ ):
return obj.numpy()
elif is_torch_tensor(A__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A__ ):
return np.asarray(A__ )
else:
return obj
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(_SCREAMING_SNAKE_CASE ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
lowerCAmelCase_ : Optional[int] = getattr(self , class_fields[0].name )
lowerCAmelCase_ : Optional[int] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : List[Any] = first_field.items()
lowerCAmelCase_ : str = True
else:
try:
lowerCAmelCase_ : Tuple = iter(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[int] = True
except TypeError:
lowerCAmelCase_ : str = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_SCREAMING_SNAKE_CASE ):
if (
not isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) )
or not len(_SCREAMING_SNAKE_CASE ) == 2
or not isinstance(element[0] , _SCREAMING_SNAKE_CASE )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase_ : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCAmelCase_ : int = element[1]
elif first_field is not None:
lowerCAmelCase_ : Tuple = first_field
else:
for field in class_fields:
lowerCAmelCase_ : Tuple = getattr(self , field.name )
if v is not None:
lowerCAmelCase_ : List[Any] = v
def __delitem__( self : List[Any] , *lowerCamelCase : int , **lowerCamelCase : List[Any] ) -> Dict:
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def __lowercase ( self : List[str] , *lowerCamelCase : Tuple , **lowerCamelCase : str ) -> Union[str, Any]:
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def __lowercase ( self : Dict , *lowerCamelCase : str , **lowerCamelCase : Optional[Any] ) -> int:
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def __lowercase ( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Dict ) -> str:
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : int , lowerCamelCase : Tuple ) -> Any:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Optional[int] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Any ) -> Union[str, Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
super().__setattr__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __setitem__( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ) -> Dict:
super().__setitem__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE):
"""simple docstring"""
@classmethod
def __lowercase ( cls : str , lowerCamelCase : Dict ) -> Tuple:
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'longest'
lowercase = 'max_length'
lowercase = 'do_not_pad'
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'pt'
lowercase = 'tf'
lowercase = 'np'
lowercase = 'jax'
class __snake_case :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : int ) -> Dict:
lowerCAmelCase_ : Optional[Any] = context_managers
lowerCAmelCase_ : List[str] = ExitStack()
def __enter__( self : List[str] ) -> int:
for context_manager in self.context_managers:
self.stack.enter_context(_SCREAMING_SNAKE_CASE )
def __exit__( self : List[Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Any ) -> Optional[int]:
self.stack.__exit__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( A__ : Dict ):
'''simple docstring'''
lowerCAmelCase_ : Any = infer_framework(A__ )
if framework == "tf":
lowerCAmelCase_ : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase_ : str = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase_ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCamelCase_ ( A__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = model_class.__name__
lowerCAmelCase_ : Tuple = infer_framework(A__ )
if framework == "tf":
lowerCAmelCase_ : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase_ : Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase_ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCamelCase_ ( A__ : Any , A__ : int = "" , A__ : Dict = "." ):
'''simple docstring'''
def _flatten_dict(A__ : Union[str, Any] , A__ : str="" , A__ : Optional[Any]="." ):
for k, v in d.items():
lowerCAmelCase_ : Tuple = str(A__ ) + delimiter + str(A__ ) if parent_key else k
if v and isinstance(A__ , A__ ):
yield from flatten_dict(A__ , A__ , delimiter=A__ ).items()
else:
yield key, v
return dict(_flatten_dict(A__ , A__ , A__ ) )
@contextmanager
def UpperCamelCase_ ( A__ : List[Any] , A__ : Tuple = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCamelCase_ ( A__ : int , A__ : Dict=None ):
'''simple docstring'''
if is_numpy_array(A__ ):
return np.transpose(A__ , axes=A__ )
elif is_torch_tensor(A__ ):
return array.T if axes is None else array.permute(*A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.transpose(A__ , perm=A__ )
elif is_jax_tensor(A__ ):
return jnp.transpose(A__ , axes=A__ )
else:
raise ValueError(f'Type not supported for transpose: {type(A__ )}.' )
def UpperCamelCase_ ( A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
if is_numpy_array(A__ ):
return np.reshape(A__ , A__ )
elif is_torch_tensor(A__ ):
return array.reshape(*A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.reshape(A__ , A__ )
elif is_jax_tensor(A__ ):
return jnp.reshape(A__ , A__ )
else:
raise ValueError(f'Type not supported for reshape: {type(A__ )}.' )
def UpperCamelCase_ ( A__ : Any , A__ : Dict=None ):
'''simple docstring'''
if is_numpy_array(A__ ):
return np.squeeze(A__ , axis=A__ )
elif is_torch_tensor(A__ ):
return array.squeeze() if axis is None else array.squeeze(dim=A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.squeeze(A__ , axis=A__ )
elif is_jax_tensor(A__ ):
return jnp.squeeze(A__ , axis=A__ )
else:
raise ValueError(f'Type not supported for squeeze: {type(A__ )}.' )
def UpperCamelCase_ ( A__ : int , A__ : str ):
'''simple docstring'''
if is_numpy_array(A__ ):
return np.expand_dims(A__ , A__ )
elif is_torch_tensor(A__ ):
return array.unsqueeze(dim=A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.expand_dims(A__ , axis=A__ )
elif is_jax_tensor(A__ ):
return jnp.expand_dims(A__ , axis=A__ )
else:
raise ValueError(f'Type not supported for expand_dims: {type(A__ )}.' )
def UpperCamelCase_ ( A__ : Union[str, Any] ):
'''simple docstring'''
if is_numpy_array(A__ ):
return np.size(A__ )
elif is_torch_tensor(A__ ):
return array.numel()
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.size(A__ )
elif is_jax_tensor(A__ ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(A__ )}.' )
def UpperCamelCase_ ( A__ : Tuple , A__ : str ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(A__ , (tuple, list) ):
lowerCAmelCase_ : Union[str, Any] = [f'{repo_id}--{v}' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase_ : Optional[int] = f'{repo_id}--{value}'
return auto_map
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
for base_class in inspect.getmro(A__ ):
lowerCAmelCase_ : Union[str, Any] = base_class.__module__
lowerCAmelCase_ : Any = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 371 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : int | float | str , A__ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase_ : str = int(A__ )
lowerCAmelCase_ : Tuple = int(A__ )
lowerCAmelCase_ : list[str] = []
for temp in range(int(A__ ) ):
series.append(f'1 / {pow(temp + 1 , int(A__ ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : str = int(input("Enter the last number (nth term) of the P-Series"))
__A : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 89 | 0 |
def lowercase_ (A : Optional[Any] , A : Tuple , A : Dict ):
snake_case__ : int = len(__lowerCAmelCase )
snake_case__ : Optional[int] = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case__ : Tuple = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : int = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCAmelCase : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__lowerCAmelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__lowerCAmelCase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase_ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__lowerCAmelCase ):
return ARTICLES_REGEX.sub(""" """ , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase ):
lowercase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
lowercase_ = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowercase_ = 1.0 * num_same / len(__lowerCAmelCase )
lowercase_ = 1.0 * num_same / len(__lowerCAmelCase )
lowercase_ = (2 * precision * recall) / (precision + recall)
return fa
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = {}
lowercase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase_ = qa["""id"""]
lowercase_ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase_ = [""""""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
lowercase_ = preds[qid]
# Take max over all gold answers
lowercase_ = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
lowercase_ = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = {}
for qid, s in scores.items():
lowercase_ = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase_ = float(not qid_to_has_ans[qid] )
else:
lowercase_ = s
return new_scores
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
if not qid_list:
lowercase_ = len(__lowerCAmelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
lowercase_ = len(__lowerCAmelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
for k in new_eval:
lowercase_ = new_eval[k]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
plt.step(__lowerCAmelCase , __lowerCAmelCase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[Any]:
'''simple docstring'''
lowercase_ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
lowercase_ = 0.0
lowercase_ = 1.0
lowercase_ = 0.0
lowercase_ = [1.0]
lowercase_ = [0.0]
lowercase_ = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase_ = true_pos / float(i + 1 )
lowercase_ = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
lowercase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowercase_ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
lowercase_ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
lowercase_ = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
lowercase_ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """pr_exact""" )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """pr_f1""" )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """pr_oracle""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not qid_list:
return
lowercase_ = [na_probs[k] for k in qid_list]
lowercase_ = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowercase_ = num_no_ans
lowercase_ = cur_score
lowercase_ = 0.0
lowercase_ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase_ = scores[qid]
else:
if preds[qid]:
lowercase_ = -1
else:
lowercase_ = 0
cur_score += diff
if cur_score > best_score:
lowercase_ = cur_score
lowercase_ = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ , lowercase_ = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ , lowercase_ = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = best_exact
lowercase_ = exact_thresh
lowercase_ = best_fa
lowercase_ = fa_thresh
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
with open(OPTS.data_file ) as f:
lowercase_ = json.load(__lowerCAmelCase )
lowercase_ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
lowercase_ = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowercase_ = json.load(__lowerCAmelCase )
else:
lowercase_ = {k: 0.0 for k in preds}
lowercase_ = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
lowercase_ = [k for k, v in qid_to_has_ans.items() if v]
lowercase_ = [k for k, v in qid_to_has_ans.items() if not v]
lowercase_ , lowercase_ = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
lowercase_ = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
lowercase_ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
lowercase_ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """HasAns""" )
if no_ans_qids:
lowercase_ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 136 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[Any] = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__lowercase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__lowercase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__lowercase , help="where to store parsed gold_data_path file" , )
_UpperCAmelCase = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
_UpperCAmelCase = json.load(__lowercase )
for dpr_record in tqdm(__lowercase ):
_UpperCAmelCase = dpr_record["question"]
_UpperCAmelCase = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__lowercase ) + "\n" )
if __name__ == "__main__":
main()
| 22 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'open-llama'
def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = initializer_range
A__ = rms_norm_eps
A__ = use_cache
A__ = kwargs.pop(
'use_memorry_efficient_attention',lowercase_ )
A__ = hidden_dropout_prob
A__ = attention_dropout_prob
A__ = use_stable_embedding
A__ = shared_input_output_embedding
A__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,)
def snake_case__ ( self : str )-> str:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}' )
A__ = self.rope_scaling.get('type',lowercase_ )
A__ = self.rope_scaling.get('factor',lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 7 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _A ( ) -> str:
_lowercase : List[Any] = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
_lowercase : Any = Dataset.from_dict(snake_case )
return dataset
class a__ ( lowerCamelCase_ ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = get_dataset()
_lowercase : Optional[int] = make_duplicate_clusters(_UpperCamelCase , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = get_dataset()
_lowercase , _lowercase : List[Any] = deduplicate_dataset(_UpperCamelCase )
self.assertEqual(len(_UpperCamelCase ) , 2 )
print(_UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , _UpperCamelCase )
| 199 |
'''simple docstring'''
from timeit import timeit
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _A ( ) -> None:
def do_benchmark(snake_case ) -> None:
_lowercase : Optional[int] = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(snake_case ) = }''' )
_lowercase : int = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=snake_case )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(snake_case ) = }''' )
_lowercase : Optional[int] = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=snake_case , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 199 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "ViltImageProcessor"
snake_case_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[int] ,A : Optional[int]=None ,A : Dict=None ,**A : List[str] ):
__A = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,A ,)
__A = kwargs.pop("feature_extractor" )
__A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A ,A )
__A = self.image_processor
def __call__( self : Optional[Any] ,A : Dict ,A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A : bool = True ,A : Union[bool, str, PaddingStrategy] = False ,A : Union[bool, str, TruncationStrategy] = None ,A : Optional[int] = None ,A : int = 0 ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : bool = False ,A : bool = False ,A : bool = False ,A : bool = False ,A : bool = True ,A : Optional[Union[str, TensorType]] = None ,**A : Tuple ,):
__A = self.tokenizer(
text=A ,add_special_tokens=A ,padding=A ,truncation=A ,max_length=A ,stride=A ,pad_to_multiple_of=A ,return_token_type_ids=A ,return_attention_mask=A ,return_overflowing_tokens=A ,return_special_tokens_mask=A ,return_offsets_mapping=A ,return_length=A ,verbose=A ,return_tensors=A ,**A ,)
# add pixel_values + pixel_mask
__A = self.image_processor(A ,return_tensors=A )
encoding.update(A )
return encoding
def UpperCamelCase_ ( self : Tuple ,*A : Optional[Any] ,**A : Tuple ):
return self.tokenizer.batch_decode(*A ,**A )
def UpperCamelCase_ ( self : List[Any] ,*A : int ,**A : Dict ):
return self.tokenizer.decode(*A ,**A )
@property
def UpperCamelCase_ ( self : List[str] ):
__A = self.tokenizer.model_input_names
__A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase_ ( self : str ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,A ,)
return self.image_processor_class
@property
def UpperCamelCase_ ( self : Optional[int] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,A ,)
return self.image_processor
| 15 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE :Optional[int] = NewType('DataClass', Any)
SCREAMING_SNAKE_CASE :int = NewType('DataClassType', Any)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(a_ , a_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def UpperCAmelCase ( a_ ) -> Callable[[str], Any]:
"""simple docstring"""
__A = {str(a_ ): choice for choice in choices}
return lambda a_ : str_to_choice.get(a_ , a_ )
def UpperCAmelCase ( *,
a_ = None , a_ = None , a_ = dataclasses.MISSING , a_ = dataclasses.MISSING , a_ = None , **a_ , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__A = {}
if aliases is not None:
__A = aliases
if help is not None:
__A = help
return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_ )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
def __init__( self : Union[str, Any] ,A : Union[DataClassType, Iterable[DataClassType]] ,**A : List[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__A = ArgumentDefaultsHelpFormatter
super().__init__(**A )
if dataclasses.is_dataclass(A ):
__A = [dataclass_types]
__A = list(A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A )
@staticmethod
def UpperCamelCase_ ( A : ArgumentParser ,A : dataclasses.Field ):
__A = f'''--{field.name}'''
__A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,A ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
__A = kwargs.pop("aliases" ,[] )
if isinstance(A ,A ):
__A = [aliases]
__A = getattr(field.type ,"__origin__" ,field.type )
if origin_type is Union or (hasattr(A ,"UnionType" ) and isinstance(A ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''' )
if type(A ) not in field.type.__args__:
# filter `str` in Union
__A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__A = getattr(field.type ,"__origin__" ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__A = (
field.type.__args__[0] if isinstance(A ,field.type.__args__[1] ) else field.type.__args__[1]
)
__A = getattr(field.type ,"__origin__" ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__A = {}
if origin_type is Literal or (isinstance(field.type ,A ) and issubclass(field.type ,A )):
if origin_type is Literal:
__A = field.type.__args__
else:
__A = [x.value for x in field.type]
__A = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
__A = field.default
else:
__A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__A = copy(A )
# Hack because type=bool in argparse does not behave as we want.
__A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__A = default
# This tells argparse we accept 0 or 1 value after --field_name
__A = "?"
# This is the value that will get picked if we do --field_name (without value)
__A = True
elif isclass(A ) and issubclass(A ,A ):
__A = field.type.__args__[0]
__A = "+"
if field.default_factory is not dataclasses.MISSING:
__A = field.default_factory()
elif field.default is dataclasses.MISSING:
__A = True
else:
__A = field.type
if field.default is not dataclasses.MISSING:
__A = field.default
elif field.default_factory is not dataclasses.MISSING:
__A = field.default_factory()
else:
__A = True
parser.add_argument(A ,*A ,**A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__A = False
parser.add_argument(f'''--no_{field.name}''' ,action="store_false" ,dest=field.name ,**A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : DataClassType ):
if hasattr(A ,"_argument_group_name" ):
__A = self.add_argument_group(dtype._argument_group_name )
else:
__A = self
try:
__A = get_type_hints(A )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ):
__A = ".".join(map(A ,sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(A ):
if not field.init:
continue
__A = type_hints[field.name]
self._parse_dataclass_field(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any]=None ,A : List[Any]=False ,A : Optional[Any]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__A = []
if args_filename:
args_files.append(Path(A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__A = ArgumentParser()
args_file_parser.add_argument(A ,type=A ,action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
__A , __A = args_file_parser.parse_known_args(args=A )
__A = vars(A ).get(args_file_flag.lstrip("-" ) ,A )
if cmd_args_file_paths:
args_files.extend([Path(A ) for p in cmd_args_file_paths] )
__A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__A = file_args + args if args is not None else file_args + sys.argv[1:]
__A , __A = self.parse_known_args(args=A )
__A = []
for dtype in self.dataclass_types:
__A = {f.name for f in dataclasses.fields(A ) if f.init}
__A = {k: v for k, v in vars(A ).items() if k in keys}
for k in keys:
delattr(A ,A )
__A = dtype(**A )
outputs.append(A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCamelCase_ ( self : Dict ,A : Dict[str, Any] ,A : bool = False ):
__A = set(args.keys() )
__A = []
for dtype in self.dataclass_types:
__A = {f.name for f in dataclasses.fields(A ) if f.init}
__A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__A = dtype(**A )
outputs.append(A )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(A )}''' )
return tuple(A )
def UpperCamelCase_ ( self : List[str] ,A : str ,A : bool = False ):
with open(Path(A ) ,encoding="utf-8" ) as open_json_file:
__A = json.loads(open_json_file.read() )
__A = self.parse_dict(A ,allow_extra_keys=A )
return tuple(A )
def UpperCamelCase_ ( self : int ,A : str ,A : bool = False ):
__A = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) ,allow_extra_keys=A )
return tuple(A )
| 15 | 1 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCamelCase : Union[str, Any] = get_logger(__name__)
class A__ :
def __init__( self : Any , _a : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(
os.path.join(_a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_SCREAMING_SNAKE_CASE =Extractor
def A ( self : List[Any] , _a : str ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_SCREAMING_SNAKE_CASE =os.path.abspath(_a )
return os.path.join(self.extract_dir , hash_url_to_filename(_a ) )
def A ( self : str , _a : str , _a : bool ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(_a ) and not (os.path.isdir(_a ) and os.listdir(_a ))
)
def A ( self : List[str] , _a : str , _a : bool = False ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.extractor.infer_extractor_format(_a )
if not extractor_format:
return input_path
_SCREAMING_SNAKE_CASE =self._get_output_path(_a )
if self._do_extract(_a , _a ):
self.extractor.extract(_a , _a , _a )
return output_path
class A__ ( A__ ):
@classmethod
@abstractmethod
def A ( cls : Any , _a : Union[Path, str] , **_a : Tuple ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
...
class A__ ( A__ , A__ ):
A__ = []
@staticmethod
def A ( _a : Union[Path, str] , _a : int ) -> Dict:
'''simple docstring'''
with open(_a , 'rb' ) as f:
return f.read(_a )
@classmethod
def A ( cls : Any , _a : Union[Path, str] , _a : bytes = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
_SCREAMING_SNAKE_CASE =max(len(_a ) for cls_magic_number in cls.magic_numbers )
try:
_SCREAMING_SNAKE_CASE =cls.read_magic_number(_a , _a )
except OSError:
return False
return any(magic_number.startswith(_a ) for cls_magic_number in cls.magic_numbers )
class A__ ( A__ ):
@classmethod
def A ( cls : Optional[Any] , _a : Union[Path, str] , **_a : int ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(_a )
@staticmethod
def A ( _a : str , _a : int ) -> Any:
'''simple docstring'''
def resolved(_a : str ) -> str:
return os.path.realpath(os.path.abspath(_a ) )
def badpath(_a : str , _a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_a , _a ) ).startswith(_a )
def badlink(_a : Tuple , _a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_SCREAMING_SNAKE_CASE =resolved(os.path.join(_a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_a )
_SCREAMING_SNAKE_CASE =resolved(_a )
for finfo in members:
if badpath(finfo.name , _a ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(_a , _a ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(_a , _a ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =tarfile.open(_a )
tar_file.extractall(_a , members=TarExtractor.safemembers(_a , _a ) )
tar_file.close()
class A__ ( A__ ):
A__ = [b'\x1F\x8B']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
with gzip.open(_a , 'rb' ) as gzip_file:
with open(_a , 'wb' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class A__ ( A__ ):
A__ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def A ( cls : str , _a : Union[Path, str] , _a : bytes = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(_a , magic_number=_a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_a , 'rb' ) as fp:
_SCREAMING_SNAKE_CASE =_EndRecData(_a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_SCREAMING_SNAKE_CASE =fp.read(_a ) # CD is where we expect it to be
if len(_a ) == sizeCentralDir:
_SCREAMING_SNAKE_CASE =struct.unpack(_a , _a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_a , exist_ok=_a )
with zipfile.ZipFile(_a , 'r' ) as zip_file:
zip_file.extractall(_a )
zip_file.close()
class A__ ( A__ ):
A__ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
with lzma.open(_a ) as compressed_file:
with open(_a , 'wb' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class A__ ( A__ ):
A__ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =rarfile.RarFile(_a )
rf.extractall(_a )
rf.close()
class A__ ( A__ ):
A__ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
_SCREAMING_SNAKE_CASE =zstd.ZstdDecompressor()
with open(_a , 'rb' ) as ifh, open(_a , 'wb' ) as ofh:
dctx.copy_stream(_a , _a )
class A__ ( A__ ):
A__ = [b'\x42\x5A\x68']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
with bza.open(_a , 'rb' ) as compressed_file:
with open(_a , 'wb' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class A__ ( A__ ):
A__ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(_a , exist_ok=_a )
with pyazr.SevenZipFile(_a , 'r' ) as archive:
archive.extractall(_a )
class A__ ( A__ ):
A__ = [b'\x04\x22\x4D\x18']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(_a , 'rb' ) as compressed_file:
with open(_a , 'wb' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
A__ = {
'tar': TarExtractor,
'gzip': GzipExtractor,
'zip': ZipExtractor,
'xz': XzExtractor,
'rar': RarExtractor,
'zstd': ZstdExtractor,
'bz2': BzipaExtractor,
'7z': SevenZipExtractor, # <Added version="2.4.0"/>
'lz4': LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A ( cls : List[Any] ) -> Dict:
'''simple docstring'''
return max(
len(_a )
for extractor in cls.extractors.values()
if issubclass(_a , _a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A ( _a : Union[Path, str] , _a : int ) -> Any:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(_a , magic_number_length=_a )
except OSError:
return b""
@classmethod
def A ( cls : List[Any] , _a : Union[Path, str] , _a : bool = False ) -> bool:
'''simple docstring'''
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=_a , )
_SCREAMING_SNAKE_CASE =cls.infer_extractor_format(_a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A ( cls : Tuple , _a : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_SCREAMING_SNAKE_CASE =cls._get_magic_number_max_length()
_SCREAMING_SNAKE_CASE =cls._read_magic_number(_a , _a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_a , magic_number=_a ):
return extractor_format
@classmethod
def A ( cls : int , _a : Union[Path, str] , _a : Union[Path, str] , _a : Optional[str] = None , _a : Optional[BaseExtractor] = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(_a ) , exist_ok=_a )
# Prevent parallel extractions
_SCREAMING_SNAKE_CASE =str(Path(_a ).with_suffix('.lock' ) )
with FileLock(_a ):
shutil.rmtree(_a , ignore_errors=_a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_a , _a ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=_a , )
_SCREAMING_SNAKE_CASE =extractor if extractor != 'deprecated' else extractor_format
else:
_SCREAMING_SNAKE_CASE =cls.extractors[extractor_format]
return extractor.extract(_a , _a )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=_a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_a ):
return extractor.extract(_a , _a )
| 370 |
'''simple docstring'''
import os
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =os.path.dirname(os.path.realpath(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =os.path.join(_UpperCamelCase , 'triangle.txt' )
with open(_UpperCamelCase ) as f:
_SCREAMING_SNAKE_CASE =f.readlines()
_SCREAMING_SNAKE_CASE =[]
for line in triangle:
_SCREAMING_SNAKE_CASE =[]
for number in line.strip().split(' ' ):
numbers_from_line.append(int(_UpperCamelCase ) )
a.append(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
for j in range(len(a[i] ) ):
_SCREAMING_SNAKE_CASE =a[i - 1][j] if j != len(a[i - 1] ) else 0
_SCREAMING_SNAKE_CASE =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCamelCase , _UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 114 | 0 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple , lowercase : Optional[Any] ) -> Optional[Any]:
# Initialise PyTorch model
_a = TaConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
_a = TaForConditionalGeneration(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 63 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for param in module.parameters():
A : Optional[Any] = False
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
A : Optional[Any] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = plt.imshow(snake_case__ )
fig.axes.get_xaxis().set_visible(snake_case__ )
fig.axes.get_yaxis().set_visible(snake_case__ )
plt.show()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[str] = datetime.now()
A : List[Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 311 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowercase : Optional[int] = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
lowercase : Optional[Any] = parser.parse_args()
if args.check_lib:
lowercase : List[Any] = importlib.import_module('transformers')
lowercase : str = Path(transformers_module.__file__).parent
else:
lowercase : List[Any] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 311 | 1 |
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = credit_card_number
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(__A ) - 2
for i in range(__A, -1, -2 ):
# double the value of every second digit
UpperCAmelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCAmelCase__ = cc_number[:i] + str(__A ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(__A ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(__A ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(__A ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 65 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs | 6 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
a__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
a__ : Optional[Any] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE : List[str] = '''lm_head'''
SCREAMING_SNAKE_CASE : str = getattr(a__ , a__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : str = getattr(a__ , a__ ).shape
else:
SCREAMING_SNAKE_CASE : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : int = value
else:
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : Any = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : str = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Any = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Optional[Any] = name.split(a__ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : List[Any] = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[Any] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Any = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : Dict = '''weight'''
else:
SCREAMING_SNAKE_CASE : int = None
set_recursively(a__ , a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : Dict = name.split('''.''' )
SCREAMING_SNAKE_CASE : Tuple = int(items[0] )
SCREAMING_SNAKE_CASE : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def UpperCAmelCase_( a__ , a__ , a__=None , a__=None , a__=True ):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = UniSpeechConfig.from_pretrained(a__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE : Tuple = Dictionary.load_from_json(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE : List[str] = target_dict.pad_index
SCREAMING_SNAKE_CASE : Optional[int] = target_dict.bos_index
SCREAMING_SNAKE_CASE : List[str] = target_dict.eos_index
SCREAMING_SNAKE_CASE : Tuple = len(target_dict.symbols )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(a__ , '''vocab.json''' )
if not os.path.isdir(a__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
SCREAMING_SNAKE_CASE : str = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE : Tuple = 42
SCREAMING_SNAKE_CASE : List[Any] = 43
with open(a__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = WavaVecaPhonemeCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a__ , )
SCREAMING_SNAKE_CASE : int = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
SCREAMING_SNAKE_CASE : str = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : List[str] = UniSpeechForCTC(a__ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = UniSpeechForPreTraining(a__ )
if is_finetuned:
SCREAMING_SNAKE_CASE : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
SCREAMING_SNAKE_CASE : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE : Optional[Any] = model[0].eval()
recursively_load_weights(a__ , a__ , a__ )
hf_unispeech.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ : Dict = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 369 |
from math import pi, sqrt, tan
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
SCREAMING_SNAKE_CASE : Optional[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(a__ , 2 ) * torus_radius * tube_radius
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
SCREAMING_SNAKE_CASE : int = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : List[str] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print('''\nSurface Areas of various geometric shapes: \n''')
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 19 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCamelCase = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
lowerCamelCase = 'hopper-medium-v2'
lowerCamelCase = gym.make(env_name)
lowerCamelCase = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
lowerCamelCase = env.reset()
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 10_00
lowerCamelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCamelCase = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = env.step(denorm_actions)
lowerCamelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCamelCase = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 199 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 5_00_03
lowerCamelCase = 5_00_02
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : int =PLBartTokenizer
UpperCamelCase__ : Dict =None
UpperCamelCase__ : Optional[Any] =False
def lowerCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Any =PLBartTokenizer(lowercase_ , language_codes='base' , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =PLBartTokenizer(lowercase_ , language_codes='base' , keep_accents=lowercase_ )
_lowerCamelCase : Optional[int] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : str =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : List[Any] =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCamelCase : str =tokenizer.vocab_size
_lowerCamelCase : List[str] =[tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 4 , lowercase_ )]
self.assertListEqual(lowercase_ , ['__java__', '__python__', '__en_XX__', '<mask>'] )
_lowerCamelCase : Optional[Any] ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCamelCase : Dict =tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) , lowercase_ , )
def lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple =PLBartTokenizer(lowercase_ , language_codes='multi' , keep_accents=lowercase_ )
_lowerCamelCase : Any =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : Optional[Any] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Dict =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCamelCase : Dict =tokenizer.vocab_size
_lowerCamelCase : Optional[int] =[tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 7 , lowercase_ )]
self.assertListEqual(
lowercase_ , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
_lowerCamelCase : int ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCamelCase : Any =tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) , lowercase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
UpperCamelCase__ : List[Any] ='uclanlp/plbart-python-en_XX'
UpperCamelCase__ : List[str] =[
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
UpperCamelCase__ : Optional[int] =[
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
UpperCamelCase__ : str =[
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase ( cls : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
_lowerCamelCase : Any =1
return cls
def lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0003 )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
_lowerCamelCase : Dict =[EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
_lowerCamelCase : Optional[int] =self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : int =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , lowercase_ )
_lowerCamelCase : Tuple =10
_lowerCamelCase : Optional[int] =self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase_ )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_0004, 5_0001] )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =tempfile.mkdtemp()
_lowerCamelCase : Dict =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
_lowerCamelCase : Any =PLBartTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='pt' )
_lowerCamelCase : List[Any] =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowercase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCamelCase : List[Any] =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_lowerCamelCase : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='pt' )
_lowerCamelCase : Dict =self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='pt' )
_lowerCamelCase : List[str] =targets['input_ids']
_lowerCamelCase : Optional[Any] =shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : Any =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 5_0003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_0001,
} , )
| 199 | 1 |
'''simple docstring'''
from __future__ import annotations
a__ : List[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase ) -> None:
__UpperCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__UpperCamelCase = {}
__UpperCamelCase = source_vertex
def __lowerCamelCase ( self ) -> None:
__UpperCamelCase = {self.source_vertex}
__UpperCamelCase = None
__UpperCamelCase = [self.source_vertex] # first in first out queue
while queue:
__UpperCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
__UpperCamelCase = vertex
queue.append(snake_case__ )
def __lowerCamelCase ( self , lowercase ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
__UpperCamelCase = self.parent.get(snake_case__ )
if target_vertex_parent is None:
__UpperCamelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Dict = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 360 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "dict"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default='''Translation''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_)
def __call__( self ) -> Optional[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "dict"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default='''TranslationVariableLanguages''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_)
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = sorted(set(self.languages ) ) if self.languages else None
__UpperCamelCase = len(self.languages ) if self.languages else None
def __call__( self ) -> Any:
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __lowerCamelCase ( self , lowercase ) -> Any:
__UpperCamelCase = set(self.languages )
if self.languages and set(lowercase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(lowercase ) - lang_set ) )}) are not in valid set ({', '.join(lowercase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCamelCase = []
for lang, text in translation_dict.items():
if isinstance(lowercase , lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCamelCase , __UpperCamelCase = zip(*sorted(lowercase ) )
return {"language": languages, "translation": translations}
def __lowerCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 243 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = [1]
for i in range(2 , __lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
A__ = []
A__ = list(range(__lowerCamelCase ) )
# Find permutation
while factorials:
A__ = factorials.pop()
A__ = divmod(__lowerCamelCase , __lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Union[str, Any] = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Optional[int] ) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 280 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if b < 0:
return 1 / actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
return actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 280 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 | 1 |
import json
import sys
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' ) as f:
_UpperCAmelCase = json.load(_snake_case )
_UpperCAmelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(_snake_case ):
_UpperCAmelCase = results[benchmark_name]
_UpperCAmelCase = benchmark_name.split('/' )[-1]
output_md.append(F"### Benchmark: {benchmark_file_name}" )
_UpperCAmelCase = '''| metric |'''
_UpperCAmelCase = '''|--------|'''
_UpperCAmelCase = '''| new / old (diff) |'''
for metric_name in sorted(_snake_case ):
_UpperCAmelCase = benchmark_res[metric_name]
_UpperCAmelCase = metric_vals['''new''']
_UpperCAmelCase = metric_vals.get('old' , _snake_case )
_UpperCAmelCase = metric_vals.get('diff' , _snake_case )
_UpperCAmelCase = F" {new_val:f}" if isinstance(_snake_case , (int, float) ) else '''None'''
if old_val is not None:
val_str += F" / {old_val:f}" if isinstance(_snake_case , (int, float) ) else "None"
if dif_val is not None:
val_str += F" ({dif_val:f})" if isinstance(_snake_case , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(_snake_case ) )
if __name__ == "__main__":
UpperCAmelCase__ = sys.argv[1]
UpperCAmelCase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 352 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 0 |
"""simple docstring"""
from collections import deque
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a , __a ):
__lowerCAmelCase = process_name # process name
__lowerCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__lowerCAmelCase = arrival_time
__lowerCAmelCase = burst_time # remaining burst time
__lowerCAmelCase = 0 # total time of the process wait in ready queue
__lowerCAmelCase = 0 # time from arrival time to completion time
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , ):
# total number of mlfq's queues
__lowerCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__lowerCAmelCase = time_slices
# unfinished process is in this ready_queue
__lowerCAmelCase = queue
# current time
__lowerCAmelCase = current_time
# finished process is in this sequence queue
__lowerCAmelCase = deque()
def snake_case ( self ):
__lowerCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case ( self , __a ):
__lowerCAmelCase = []
for i in range(len(__a ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case ( self , __a ):
__lowerCAmelCase = []
for i in range(len(__a ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case ( self , __a ):
__lowerCAmelCase = []
for i in range(len(__a ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case ( self , __a ):
return [q.burst_time for q in queue]
def snake_case ( self , __a ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case ( self , __a ):
__lowerCAmelCase = deque() # sequence deque of finished process
while len(__a ) != 0:
__lowerCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__a )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__lowerCAmelCase = 0
# set the process's turnaround time because it is finished
__lowerCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__lowerCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(__a )
self.finish_queue.extend(__a ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case ( self , __a , __a ):
__lowerCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__a ) ):
__lowerCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__a )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__lowerCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__a )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__lowerCAmelCase = 0
# set the finish time
__lowerCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__lowerCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__a )
self.finish_queue.extend(__a ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case ( self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__lowerCAmelCase , __lowerCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A : Tuple = Process("P1", 0, 5_3)
A : Optional[int] = Process("P2", 0, 1_7)
A : Union[str, Any] = Process("P3", 0, 6_8)
A : Dict = Process("P4", 0, 2_4)
A : Any = 3
A : Optional[Any] = [1_7, 2_5]
A : Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
A : List[str] = Process("P1", 0, 5_3)
A : int = Process("P2", 0, 1_7)
A : Tuple = Process("P3", 0, 6_8)
A : List[Any] = Process("P4", 0, 2_4)
A : int = 3
A : str = [1_7, 2_5]
A : str = deque([Pa, Pa, Pa, Pa])
A : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
A : Optional[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 57 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ="Hello, World!"
UpperCAmelCase ="en_XX"
def _A ( _a : str , _a : str , _a : bool ):
"""simple docstring"""
A = Path("""data_bin""" )
A = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_a ).parent ) , checkpoint_file=Path(_a ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_a ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_a ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_a )
A = xmod.model.encoder.sentence_encoder
A = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _a )
A = XmodForSequenceClassification(_a ) if classification_head else XmodForMaskedLM(_a )
model.eval()
# Now let's copy all the weights.
# Embeddings
A = xmod_sent_encoder.embed_tokens.weight
A = xmod_sent_encoder.embed_positions.weight
A = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A = xmod_sent_encoder.layernorm_embedding.weight
A = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A = model.roberta.encoder.layer[i]
A = xmod_sent_encoder.layers[i]
# self attention
A = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A = xmod_layer.self_attn.q_proj.weight
A = xmod_layer.self_attn.q_proj.bias
A = xmod_layer.self_attn.k_proj.weight
A = xmod_layer.self_attn.k_proj.bias
A = xmod_layer.self_attn.v_proj.weight
A = xmod_layer.self_attn.v_proj.bias
# self-attention output
A = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A = xmod_layer.self_attn.out_proj.weight
A = xmod_layer.self_attn.out_proj.bias
A = xmod_layer.self_attn_layer_norm.weight
A = xmod_layer.self_attn_layer_norm.bias
# intermediate
A = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A = xmod_layer.fca.weight
A = xmod_layer.fca.bias
# output
A = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A = xmod_layer.fca.weight
A = xmod_layer.fca.bias
A = xmod_layer.final_layer_norm.weight
A = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A = xmod_layer.adapter_layer_norm.weight
A = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A = bert_output.adapter_modules[lang_code]
A = xmod_layer.adapter_modules[lang_code]
A = from_adapter.fca.weight
A = from_adapter.fca.bias
A = from_adapter.fca.weight
A = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A = xmod_sent_encoder.layer_norm.weight
A = xmod_sent_encoder.layer_norm.bias
if classification_head:
A = xmod.model.classification_heads["""mnli"""].dense.weight
A = xmod.model.classification_heads["""mnli"""].dense.bias
A = xmod.model.classification_heads["""mnli"""].out_proj.weight
A = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A = xmod.model.encoder.lm_head.dense.weight
A = xmod.model.encoder.lm_head.dense.bias
A = xmod.model.encoder.lm_head.layer_norm.weight
A = xmod.model.encoder.lm_head.layer_norm.bias
A = xmod.model.encoder.lm_head.weight
A = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A = xmod.encode(_a ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_a )
A = model(_a )[0]
if classification_head:
A = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_a ) )
else:
A = xmod.model(_a , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
A = torch.allclose(_a , _a , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_a ).mkdir(parents=_a , exist_ok=_a )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase =parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 77 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase =logging.get_logger(__name__)
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,lowerCamelCase_ = 1 / 2_5_5 ,lowerCamelCase_ = True ,lowerCamelCase_ = IMAGENET_DEFAULT_MEAN ,lowerCamelCase_ = IMAGENET_DEFAULT_STD ,**lowerCamelCase_ ,) -> None:
super().__init__(**lowerCamelCase_ )
A = size if size is not None else {"""shortest_edge""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A = int((2_5_6 / 2_2_4) * size["""shortest_edge"""] )
A = get_resize_output_image_size(lowerCamelCase_ ,size=lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
lowerCamelCase_ ,size=(size_dict["""height"""], size_dict["""width"""]) ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = ChannelDimension.FIRST ,**lowerCamelCase_ ,) -> BatchFeature:
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
A = [self.resize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_center_crop:
A = [self.center_crop(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_rescale:
A = [self.rescale(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_normalize:
A = [self.normalize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 77 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Tuple=3_2 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[str]=1_0 , UpperCAmelCase__ : Any=[8, 1_6, 3_2, 6_4] , UpperCAmelCase__ : Optional[int]=[1, 1, 2, 1] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]="relu" , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Optional[Any]=[2, 3, 4] , UpperCAmelCase__ : List[str]=1 , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = embeddings_size
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = scope
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = num_groups
def __UpperCAmelCase ( self : str ) -> str:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple ) -> str:
lowerCAmelCase = BitModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = BitForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) -> str:
lowerCAmelCase = BitBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = BitBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCamelCase : Union[str, Any] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : List[str] = False
lowerCamelCase : List[Any] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = BitModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
return
@unittest.skip(reason='Bit does not output attentions' )
def __UpperCAmelCase ( self : Any ) -> Tuple:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __UpperCAmelCase ( self : Any ) -> str:
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(config=UpperCAmelCase__ )
for name, module in model.named_modules():
if isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __UpperCAmelCase ( self : int ) -> Any:
def check_hidden_states_output(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple ):
lowerCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase = layer_type
lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __UpperCAmelCase ( self : Tuple ) -> Any:
pass
def __UpperCAmelCase ( self : str ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = BitModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
lowerCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**UpperCAmelCase__ )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@require_torch
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
lowerCamelCase : Optional[int] = BitConfig
lowerCamelCase : Optional[Any] = False
def __UpperCAmelCase ( self : Dict ) -> int:
lowerCAmelCase = BitModelTester(self )
| 4 |
"""simple docstring"""
import baseaa
def UpperCamelCase ( UpperCAmelCase ) ->bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return baseaa.baadecode(UpperCAmelCase ).decode("utf-8" )
if __name__ == "__main__":
UpperCamelCase_ = 'Hello World!'
UpperCamelCase_ = baseaa_encode(test)
print(encoded)
UpperCamelCase_ = baseaa_decode(encoded)
print(decoded) | 243 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def snake_case (A_ :np.ndarray , A_ :np.ndarray ) -> int:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(A_ , A_ ) ) )
def snake_case (A_ :np.ndarray , A_ :np.ndarray ) -> List[Any]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
a : Optional[Any] = (
'Wrong input data\'s dimensions... '
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(A_ )
try:
if dataset.shape[1] != value_array.shape[1]:
a : Optional[int] = (
'Wrong input data\'s shape... '
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(A_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
a : Optional[Any] = (
'Input data have different datatype... '
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(A_ )
a : Tuple = []
for value in value_array:
a : List[Any] = euclidean(A_ , dataset[0] )
a : int = dataset[0].tolist()
for dataset_value in dataset[1:]:
a : Optional[int] = euclidean(A_ , A_ )
if dist > temp_dist:
a : List[str] = temp_dist
a : int = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def snake_case (A_ :np.ndarray , A_ :np.ndarray ) -> Union[str, Any]:
'''simple docstring'''
return np.dot(A_ , A_ ) / (norm(A_ ) * norm(A_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
"""simple docstring"""
def snake_case (A_ :int ):
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(A_ , A_ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
a : List[Any] = False
if num < 0:
a : Optional[int] = True
a : Dict = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 | 0 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Any = 16
UpperCAmelCase : Any = 32
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a = 16 ) -> Any:
__A : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__A : int = DatasetDict(
{
'train': dataset['train'].select(a ),
'validation': dataset['train'].select(a ),
'test': dataset['validation'],
} )
def tokenize_function(a ):
# max_length=None => use the model max length (it's actually the default)
__A : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A : int = datasets.map(
a , batched=a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A : Any = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A : List[str] = 16
elif accelerator.mixed_precision != "no":
__A : List[str] = 8
else:
__A : List[Any] = None
return tokenizer.pad(
a , padding='longest' , max_length=a , pad_to_multiple_of=a , return_tensors='pt' , )
# Instantiate dataloaders.
__A : Optional[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=a , collate_fn=a , batch_size=a )
__A : Optional[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=a , collate_fn=a , batch_size=a )
__A : Optional[Any] = DataLoader(
tokenized_datasets['test'] , shuffle=a , collate_fn=a , batch_size=a )
return train_dataloader, eval_dataloader, test_dataloader
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
# New Code #
__A : Tuple = []
# Download the dataset
__A : List[Any] = load_dataset('glue' , 'mrpc' )
# Create our splits
__A : Optional[int] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__A : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : Tuple = config['lr']
__A : str = int(config['num_epochs'] )
__A : Any = int(config['seed'] )
__A : Optional[Any] = int(config['batch_size'] )
__A : str = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__A : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__A : Tuple = batch_size // MAX_GPU_BATCH_SIZE
__A : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(a )
# New Code #
# Create our folds:
__A : Optional[Any] = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
__A : int = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(a ):
__A , __A , __A : Optional[Any] = get_fold_dataloaders(
a , a , a , a , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : Any = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A : str = model.to(accelerator.device )
# Instantiate optimizer
__A : Tuple = AdamW(params=model.parameters() , lr=a )
# Instantiate scheduler
__A : Optional[int] = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=1_00 , num_training_steps=(len(a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A , __A , __A , __A , __A : int = accelerator.prepare(
a , a , a , a , a )
# Now we train the model
for epoch in range(a ):
model.train()
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__A : Optional[Any] = model(**a )
__A : Optional[int] = outputs.loss
__A : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A : List[str] = model(**a )
__A : Any = outputs.logits.argmax(dim=-1 )
__A , __A : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a , references=a , )
__A : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , a )
# New Code #
# We also run predictions on the test set at the very end
__A : Any = []
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A : Dict = model(**a )
__A : str = outputs.logits
__A , __A : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(a , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__A : List[Any] = torch.cat(a , dim=0 )
__A : int = torch.stack(a , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__A : Any = metric.compute(predictions=a , references=a )
accelerator.print('Average test metrics from all folds:' , a )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__A : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a , default=a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=a , default=3 , help='The number of splits to perform across the dataset' )
__A : List[Any] = parser.parse_args()
__A : List[str] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(a , a )
if __name__ == "__main__":
main()
| 280 |
from heapq import heappop, heappush
import numpy as np
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> tuple[float | int, list[tuple[int, int]]]:
__A , __A : int = grid.shape
__A : Any = [-1, 1, 0, 0]
__A : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__A , __A : Optional[int] = [(0, source)], set()
__A : Any = np.full((rows, cols) , np.inf )
__A : Any = 0
__A : Any = np.empty((rows, cols) , dtype=a )
__A : Optional[Any] = None
while queue:
((__A) , (__A)) : List[str] = heappop(a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__A : int = []
while (x, y) != source:
path.append((x, y) )
__A , __A : Optional[int] = predecessors[x, y]
path.append(a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a ) ):
__A , __A : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__A : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a , (dist + 1, (nx, ny)) )
__A : List[Any] = dist + 1
__A : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
lowerCamelCase__ = {str(digit): digit**5 for digit in range(10)}
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a__ ) )
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
return sum(
number
for number in range(1_000 , 1_000_000 )
if number == digits_fifth_powers_sum(a__ ) )
if __name__ == "__main__":
print(solution())
| 359 | # This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = multiprocessing.Manager()
_UpperCamelCase = manager.list()
_UpperCamelCase = multiprocessing.Process(target=a__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCamelCase = shutil.rmtree
_UpperCamelCase = os.rmdir
_UpperCamelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCamelCase = {}
with swallow_io():
with time_limit(a__ ):
exec(a__ , a__ )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(f'failed: {e}' )
# Needed for cleaning up.
_UpperCamelCase = rmtree
_UpperCamelCase = rmdir
_UpperCamelCase = chdir
@contextlib.contextmanager
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
def signal_handler(a__ , a__ ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , a__ )
signal.signal(signal.SIGALRM , a__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCAmelCase__ ( ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(a__ ):
with contextlib.redirect_stderr(a__ ):
with redirect_stdin(a__ ):
yield
@contextlib.contextmanager
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(a__ ):
yield dirname
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
pass
class _UpperCAmelCase ( io.StringIO ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
raise OSError
def __UpperCAmelCase ( self : str , *lowercase_ : Any , **lowercase_ : Optional[Any]) -> str:
"""simple docstring"""
raise OSError
def __UpperCAmelCase ( self : Union[str, Any] , *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any]) -> str:
"""simple docstring"""
raise OSError
def __UpperCAmelCase ( self : Optional[Any] , *lowercase_ : str , **lowercase_ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return False
class _UpperCAmelCase ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__A = '''stdin'''
@contextlib.contextmanager
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
if root == ".":
yield
return
_UpperCamelCase = os.getcwd()
os.chdir(a__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a__ )
def lowerCAmelCase__ ( a__=None ) ->Tuple:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCamelCase = None
_UpperCamelCase = None
import os
_UpperCamelCase = "1"
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import shutil
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import subprocess
_UpperCamelCase = None # type: ignore
_UpperCamelCase = None
import sys
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
| 63 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self : str ):
__lowercase = 1
__lowercase = 3
__lowercase = (3_2, 3_2)
__lowercase = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(UpperCAmelCase__ )
return image
@property
def _lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, )
return model
@property
def _lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
return model
@property
def _lowercase ( self : Dict ):
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
return CLIPTextModel(UpperCAmelCase__ )
@property
def _lowercase ( self : str ):
def extract(*UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : str ):
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int ):
__lowercase = torch.ones([0] )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Dict ):
self.pixel_values.to(UpperCAmelCase__ )
return self
return Out()
return extract
def _lowercase ( self : Optional[Any] ):
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.dummy_cond_unet
__lowercase = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=UpperCAmelCase__, set_alpha_to_one=UpperCAmelCase__, )
__lowercase = self.dummy_vae
__lowercase = self.dummy_text_encoder
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
__lowercase = StableDiffusionPipeline(
unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=self.dummy_extractor, )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A painting of a squirrel eating a burger"
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
__lowercase = sd_pipe([prompt], generator=UpperCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type="np" )
__lowercase = output.images
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=UpperCAmelCase__, )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Union[str, Any] ):
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.dummy_cond_unet
__lowercase = PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
__lowercase = self.dummy_vae
__lowercase = self.dummy_text_encoder
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
__lowercase = StableDiffusionPipeline(
unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=self.dummy_extractor, )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A painting of a squirrel eating a burger"
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
__lowercase = sd_pipe([prompt], generator=UpperCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type="np" )
__lowercase = output.images
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=UpperCAmelCase__, )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : int ):
__lowercase = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ )
assert isinstance(pipe.scheduler, UpperCAmelCase__ )
assert pipe.safety_checker is None
__lowercase = pipe("example prompt", num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("example prompt", num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU" )
def _lowercase ( self : str ):
__lowercase = self.dummy_cond_unet
__lowercase = PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
__lowercase = self.dummy_vae
__lowercase = self.dummy_text_encoder
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
__lowercase = unet.half()
__lowercase = vae.half()
__lowercase = bert.half()
# make sure here that pndm scheduler skips prk
__lowercase = StableDiffusionPipeline(
unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=self.dummy_extractor, )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A painting of a squirrel eating a burger"
__lowercase = sd_pipe([prompt], num_inference_steps=2, output_type="np" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Dict ):
__lowercase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=UpperCAmelCase__ )
__lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
__lowercase = 4_0_0_3_6_6_0_3_4_6
__lowercase = 7
# without safety guidance (sld_guidance_scale = 0)
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=0, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : str ):
__lowercase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=UpperCAmelCase__ )
__lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "padme amidala taking a bath artwork, safe for work, no nudity"
__lowercase = 2_7_3_4_9_7_1_7_5_5
__lowercase = 7
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=0, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Tuple ):
__lowercase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
__lowercase = 1_0_4_4_3_5_5_2_3_4
__lowercase = 1_2
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=0, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 17 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyInpaintPipeline
a__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[int] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__: Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
a__: Optional[Any] = MultilingualCLIP(lowercase)
a__: int = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: str = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.dummy_text_encoder
a__: int = self.dummy_tokenizer
a__: str = self.dummy_unet
a__: Any = self.dummy_movq
a__: Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> Any:
'''simple docstring'''
a__: List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase)).to(lowercase)
a__: int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase)
# create init_image
a__: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
a__: int = image.cpu().permute(0 , 2 , 3 , 1)[0]
a__: Optional[int] = Image.fromarray(np.uinta(lowercase)).convert('RGB').resize((2_56, 2_56))
# create mask
a__: Tuple = np.ones((64, 64) , dtype=np.floataa)
a__: Optional[Any] = 0
if str(lowercase).startswith('mps'):
a__: str = torch.manual_seed(lowercase)
else:
a__: Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Optional[int] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = 'cpu'
a__: List[Any] = self.get_dummy_components()
a__: Optional[Any] = self.pipeline_class(**lowercase)
a__: str = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[int] = pipe(**self.get_dummy_inputs(lowercase))
a__: List[str] = output.images
a__: int = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 64, 64, 3)
a__: str = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy')
a__: int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
a__: Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa)
a__: int = 0
a__: Optional[int] = 'a hat'
a__: int = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Any = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa)
a__: Optional[Any] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Dict = torch.Generator(device='cpu').manual_seed(0)
a__ , a__: Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: List[str] = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
a__: str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290 | 0 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [0] * no_of_processes
lowerCAmelCase__ : Any = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(UpperCamelCase ):
lowerCAmelCase__ : Dict = burst_time[i]
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Optional[int] = 999999999
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Optional[int] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(UpperCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase__ : Dict = remaining_time[j]
lowerCAmelCase__ : List[str] = j
lowerCAmelCase__ : str = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase__ : int = remaining_time[short]
if minm == 0:
lowerCAmelCase__ : Union[str, Any] = 999999999
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase__ : Tuple = False
# Find finish time of current process
lowerCAmelCase__ : Dict = increment_time + 1
# Calculate waiting time
lowerCAmelCase__ : Tuple = finish_time - arrival_time[short]
lowerCAmelCase__ : List[str] = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase__ : Union[str, Any] = 0
# Increment time
increment_time += 1
return waiting_time
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [0] * no_of_processes
for i in range(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Optional[Any] = 0
for i in range(UpperCamelCase ):
lowerCAmelCase__ : Dict = total_waiting_time + waiting_time[i]
lowerCAmelCase__ : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
_lowerCAmelCase = int(input())
_lowerCAmelCase = [0] * no_of_processes
_lowerCAmelCase = [0] * no_of_processes
_lowerCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
_lowerCAmelCase , _lowerCAmelCase = map(int, input().split())
_lowerCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowerCAmelCase = burst_time
_lowerCAmelCase = no_of_processes
_lowerCAmelCase = waiting_time
_lowerCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_lowerCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 184 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase , UpperCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCAmelCase__ : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
lowerCAmelCase__ : Tuple = answer
return answer
lowerCAmelCase__ : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = [0] * (target + 1)
lowerCAmelCase__ : List[Any] = 1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 184 | 1 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def a_ ( ):
'''simple docstring'''
raise RuntimeError('CUDA out of memory.' )
class UpperCAmelCase_ ( nn.Module):
def __init__( self ) -> Optional[Any]:
super().__init__()
lowercase__ : List[str] = nn.Linear(3 , 4 )
lowercase__ : List[Any] = nn.BatchNormad(4 )
lowercase__ : Optional[int] = nn.Linear(4 , 5 )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(a ) ) )
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(a ):
nonlocal batch_sizes
batch_sizes.append(a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : List[str] = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(a , a ):
nonlocal batch_sizes
batch_sizes.append(a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowercase__ , lowercase__ : Any = mock_training_loop_function('hello' )
self.assertListEqual(a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def _UpperCAmelCase ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(a ):
pass
with self.assertRaises(a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _UpperCAmelCase ( self ) -> Any:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(a ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _UpperCAmelCase ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(a , a , a ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(a ) as cm:
mock_training_loop_function(1_2_8 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def _UpperCAmelCase ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(a ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(a ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Dict = torch.cuda.memory_allocated()
lowercase__ : Any = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , a )
lowercase__ : Tuple = release_memory(a )
self.assertEqual(torch.cuda.memory_allocated() , a )
| 77 | """simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCAmelCase_ :
def __init__( self , a ) -> List[str]:
if isinstance(a , a ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase__ : Optional[Any] = deepcopy(a )
elif os.path.exists(a ):
with io.open(a , 'r' , encoding='utf-8' ) as f:
lowercase__ : List[Any] = json.load(a )
else:
try:
lowercase__ : Optional[int] = baseaa.urlsafe_baadecode(a ).decode('utf-8' )
lowercase__ : List[str] = json.loads(a )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowercase__ : Any = config
self.set_stage_and_offload()
def _UpperCAmelCase ( self ) -> Dict:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowercase__ : Tuple = self.get_value('zero_optimization.stage' , -1 )
# offload
lowercase__ : int = False
if self.is_zeroa() or self.is_zeroa():
lowercase__ : str = set(['cpu', 'nvme'] )
lowercase__ : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase__ : Optional[Any] = True
def _UpperCAmelCase ( self , a ) -> Any:
lowercase__ : Dict = self.config
# find the config node of interest if it exists
lowercase__ : int = ds_key_long.split('.' )
lowercase__ : Dict = nodes.pop()
for node in nodes:
lowercase__ : Optional[Any] = config.get(a )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCAmelCase ( self , a , a=None ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = self.find_config_node(a )
if config is None:
return default
return config.get(a , a )
def _UpperCAmelCase ( self , a , a=False ) -> Any:
lowercase__ : str = self.config
# find the config node of interest if it exists
lowercase__ : List[Any] = ds_key_long.split('.' )
for node in nodes:
lowercase__ : str = config
lowercase__ : str = config.get(a )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(a )
def _UpperCAmelCase ( self , a ) -> List[Any]:
lowercase__ : Union[str, Any] = self.get_value(a )
return False if value is None else bool(a )
def _UpperCAmelCase ( self , a ) -> Any:
lowercase__ : Any = self.get_value(a )
return False if value is None else not bool(a )
def _UpperCAmelCase ( self ) -> Tuple:
return self._stage == 2
def _UpperCAmelCase ( self ) -> List[Any]:
return self._stage == 3
def _UpperCAmelCase ( self ) -> str:
return self._offload
class UpperCAmelCase_ :
def __init__( self , a ) -> str:
lowercase__ : Tuple = engine
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
# runs backpropagation and handles mixed precision
self.engine.backward(a , **a )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCAmelCase_ ( _a):
def __init__( self , a ) -> Dict:
super().__init__(a , device_placement=a , scaler=a )
lowercase__ : Union[str, Any] = hasattr(self.optimizer , 'overflow' )
def _UpperCAmelCase ( self , a=None ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCAmelCase ( self ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCAmelCase ( self ) -> Tuple:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCAmelCase_ ( _a):
def __init__( self , a , a ) -> Any:
super().__init__(a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCAmelCase_ :
def __init__( self , a , a=0.001 , a=0 , **a ) -> Tuple:
lowercase__ : List[Any] = params
lowercase__ : int = lr
lowercase__ : int = weight_decay
lowercase__ : Union[str, Any] = kwargs
class UpperCAmelCase_ :
def __init__( self , a , a=None , a=0 , **a ) -> Tuple:
lowercase__ : Dict = optimizer
lowercase__ : List[str] = total_num_steps
lowercase__ : Optional[int] = warmup_num_steps
lowercase__ : List[Any] = kwargs
| 77 | 1 |
import random
class A :
"""simple docstring"""
@staticmethod
def snake_case__ ( lowercase_ : str )-> tuple[list[int], list[int]]:
'''simple docstring'''
A__ = [ord(lowercase_ ) for i in text]
A__ = []
A__ = []
for i in plain:
A__ = random.randint(1,3_0_0 )
A__ = (i + k) * k
cipher.append(lowercase_ )
key.append(lowercase_ )
return cipher, key
@staticmethod
def snake_case__ ( lowercase_ : list[int],lowercase_ : list[int] )-> str:
'''simple docstring'''
A__ = []
for i in range(len(lowercase_ ) ):
A__ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowercase_ ) )
return "".join(lowercase_ )
if __name__ == "__main__":
lowercase_ , lowercase_ = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 353 |
import datasets
from .evaluate import evaluate
lowercase_ = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
lowercase_ = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
lowercase_ = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ),codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'],reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'],)
def snake_case__ ( self : str,lowercase_ : List[Any],lowercase_ : Dict )-> List[str]:
'''simple docstring'''
A__ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
A__ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=lowercase_,predictions=lowercase_ )
return score
| 282 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Dict ="convbert"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=768 , snake_case__=2 , snake_case__=9 , snake_case__=1 , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[int] = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = embedding_size
lowerCAmelCase : Any = head_ratio
lowerCAmelCase : str = conv_kernel_size
lowerCAmelCase : Tuple = num_groups
lowerCAmelCase : Tuple = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 108 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )->List[str]:
'''simple docstring'''
A_ : str = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : Dict = num_channels
A_ : Tuple = embeddings_size
A_ : Union[str, Any] = hidden_sizes
A_ : Dict = depths
A_ : str = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_act
A_ : Optional[Any] = num_labels
A_ : Tuple = scope
A_ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : str = None
if self.use_labels:
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Dict = RegNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Any = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : Dict = RegNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : int = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Union[str, Any] = RegNetModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self )->Tuple:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _snake_case ( self )->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _snake_case ( self )->str:
'''simple docstring'''
pass
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Any = [*signature.parameters.keys()]
A_ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(config=_SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : str = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A_ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : int = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : List[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->str:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = RegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self )->List[str]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : List[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.default_image_processor
A_ : Any = prepare_img()
A_ : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
A_ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 186 | 0 |
"""simple docstring"""
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Tuple= [1]
lowercase__, lowercase__, lowercase__ : int= 0, 0, 0
lowercase__ : List[str]= ugly_nums[ia] * 2
lowercase__ : List[Any]= ugly_nums[ia] * 3
lowercase__ : Union[str, Any]= ugly_nums[ia] * 5
for _ in range(1 , A ):
lowercase__ : Dict= min(A , A , A )
ugly_nums.append(A )
if next_num == next_a:
ia += 1
lowercase__ : Optional[int]= ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowercase__ : Any= ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowercase__ : Union[str, Any]= ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 150 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A , A ) ->list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ : List[str]= number_of_bytes // partitions
lowercase__ : Dict= []
for i in range(A ):
lowercase__ : Union[str, Any]= i * bytes_per_partition + 1
lowercase__ : Any= (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( snake_case__ : Union[str, Any] ):
A = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Any = StableDiffusionLatentUpscalePipeline
_lowerCamelCase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
_lowerCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
_lowerCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase: Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase: Optional[int] = frozenset([] )
_lowerCamelCase: Tuple = True
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
A = 1
A = 4
A = (16, 16)
A = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(A_ )
return image
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
torch.manual_seed(0 )
A = UNetaDConditionModel(
act_fn='gelu' ,attention_head_dim=8 ,norm_num_groups=A_ ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) ,in_channels=8 ,mid_block_type=A_ ,only_cross_attention=A_ ,out_channels=5 ,resnet_time_scale_shift='scale_shift' ,time_embedding_type='fourier' ,timestep_post_act='gelu' ,up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') ,)
A = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
A = EulerDiscreteScheduler(prediction_type='sample' )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='quick_gelu' ,projection_dim=512 ,)
A = CLIPTextModel(A_ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any]=0 ) -> List[Any]:
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 256, 256, 3) )
A = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ ,1e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().test_save_load_local(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 2
A = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
A = getattr(A_ ,scheduler_enum.name )
A = scheduler_cls.from_config(pipe.scheduler.config )
A = pipe(**A_ )[0]
outputs.append(A_ )
assert check_same_shape(A_ )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = torch.manual_seed(33 )
A = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ,torch_dtype=torch.floataa )
pipe.to('cuda' )
A = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
A = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
A = pipe(A_ ,generator=A_ ,output_type='latent' ).images
A = upscaler(
prompt=A_ ,image=A_ ,num_inference_steps=20 ,guidance_scale=0 ,generator=A_ ,output_type='np' ,).images[0]
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = torch.manual_seed(33 )
A = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
A = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
A = upscaler(
prompt=A_ ,image=A_ ,num_inference_steps=20 ,guidance_scale=0 ,generator=A_ ,output_type='np' ,).images[0]
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5e-2 | 74 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase__ = "\nimport os\n"
UpperCamelCase__ = "\ndef foo():\n import os\n return False\n"
UpperCamelCase__ = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
UpperCamelCase__ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case", lowerCamelCase__ )
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = os.path.join(lowerCamelCase__, "test_file.py" )
with open(lowerCamelCase__, "w" ) as _tmp_file:
_tmp_file.write(lowerCamelCase__ )
UpperCAmelCase__ = get_imports(lowerCamelCase__ )
assert parsed_imports == ["os"]
| 354 | import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = '▁'
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = BigBirdTokenizer
__UpperCAmelCase : Optional[int] = BigBirdTokenizerFast
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : List[Any] = True
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "<s>"
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_4 )
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = "Hello World!"
UpperCAmelCase__ = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase__ = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = BigBirdConfig(attention_type="original_full" )
UpperCAmelCase__ = BigBirdModel(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
UpperCAmelCase__ = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase_ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = {"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 143 | 0 |
import datasets
from .evaluate import evaluate
A : Dict = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
A : Any = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
A : str = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowerCamelCase__ : Any = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowerCamelCase__ : Optional[int] = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase )
return score
| 184 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _lowercase :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=14 , __lowerCamelCase : str=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=99 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : Dict=0.0_2 , ):
'''simple docstring'''
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Tuple = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : List[Any] = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : int = rotary_dim
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Any = None
lowerCamelCase__ : Optional[Any] = vocab_size - 1
lowerCamelCase__ : List[Any] = vocab_size - 1
lowerCamelCase__ : str = vocab_size - 1
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Dict = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : Tuple = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = 20
lowerCamelCase__ : Tuple = model_class_name(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = model.init_cache(input_ids.shape[0] , __lowerCamelCase )
lowerCamelCase__ : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCamelCase__ : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : Tuple = model(
input_ids[:, :-1] , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , position_ids=__lowerCamelCase , )
lowerCamelCase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCamelCase__ : str = model(
input_ids[:, -1:] , attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , position_ids=__lowerCamelCase , )
lowerCamelCase__ : List[Any] = model(__lowerCamelCase )
lowerCamelCase__ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : int = 20
lowerCamelCase__ : int = model_class_name(__lowerCamelCase )
lowerCamelCase__ : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCamelCase__ : Dict = model.init_cache(input_ids.shape[0] , __lowerCamelCase )
lowerCamelCase__ : Any = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : List[Any] = model(
input_ids[:, :-1] , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , position_ids=__lowerCamelCase , )
lowerCamelCase__ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCamelCase__ : Any = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__lowerCamelCase , position_ids=__lowerCamelCase , )
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
lowerCamelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Any = FlaxGPTJModelTester(self )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@tooslow
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
lowerCamelCase__ : List[str] = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=__lowerCamelCase , truncation=__lowerCamelCase )
lowerCamelCase__ : Any = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : str = model.config.eos_token_id
lowerCamelCase__ : int = jax.jit(model.generate )
lowerCamelCase__ : str = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCamelCase__ : Tuple = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
lowerCamelCase__ : Any = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : int = getattr(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = pt_inputs["input_ids"].shape
lowerCamelCase__ : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = 1
lowerCamelCase__ : List[Any] = pt_model_class(__lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = model_class(__lowerCamelCase , dtype=jnp.floataa )
lowerCamelCase__ : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __lowerCamelCase )
lowerCamelCase__ : Optional[int] = fx_state
with torch.no_grad():
lowerCamelCase__ : List[Any] = pt_model(**__lowerCamelCase ).to_tuple()
lowerCamelCase__ : Union[str, Any] = fx_model(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : List[Any] = model_class.from_pretrained(__lowerCamelCase , from_pt=__lowerCamelCase )
lowerCamelCase__ : int = fx_model_loaded(**__lowerCamelCase ).to_tuple()
self.assertEqual(
len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : List[Any] = getattr(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = pt_model_class(__lowerCamelCase ).eval()
lowerCamelCase__ : Tuple = model_class(__lowerCamelCase , dtype=jnp.floataa )
lowerCamelCase__ : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCamelCase , fx_model.params )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pt_inputs["input_ids"].shape
lowerCamelCase__ : Optional[int] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase__ : Dict = pt_model(**__lowerCamelCase ).to_tuple()
lowerCamelCase__ : List[str] = fx_model(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : Any = pt_model_class.from_pretrained(__lowerCamelCase , from_flax=__lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = pt_model_loaded(**__lowerCamelCase ).to_tuple()
self.assertEqual(
len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
lowerCamelCase__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
| 184 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a_ ( a_ ):
'''simple docstring'''
__a: str = ['''vqvae''']
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowerCAmelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase_ , device=self.device , )
lowerCAmelCase_ = noise
lowerCAmelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase_ , lowercase_ )
lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ )
lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample(
generator=lowercase_ )[0]
lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ = int(mask_start_secs * pixels_per_second )
lowerCAmelCase_ = int(mask_end_secs * pixels_per_second )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase_ ):
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample']
else:
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
if isinstance(self.scheduler , lowercase_ ):
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample']
else:
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample']
lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' )
lowerCAmelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) )
lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowercase_ )
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor:
'''simple docstring'''
lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
| 14 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , A : Optional[Any] , A : int=14 , A : Tuple=7 , A : str=True , A : Tuple=True , A : Optional[int]=True , A : Tuple=True , A : List[str]=True , A : List[str]=99 , A : Optional[Any]=32 , A : str=5 , A : Tuple=4 , A : str=37 , A : Any="gelu" , A : Any=0.1 , A : str=0.1 , A : int=512 , A : List[str]=16 , A : List[Any]=2 , A : List[str]=0.02 , A : List[Any]=3 , A : Any=4 , A : int=None , ):
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : List[str] = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Union[str, Any] = use_token_type_ids
_UpperCAmelCase : Optional[int] = use_input_mask
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : Optional[Any] = use_mc_token_ids
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Dict = type_vocab_size
_UpperCAmelCase : Tuple = type_sequence_label_size
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_labels
_UpperCAmelCase : Optional[int] = num_choices
_UpperCAmelCase : Dict = scope
_UpperCAmelCase : Optional[Any] = self.vocab_size - 1
def _A ( self : List[str] ):
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : str = None
if self.use_input_mask:
_UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : str = None
if self.use_mc_token_ids:
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[Any] = self.get_config()
_UpperCAmelCase : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self : Optional[Any] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _A ( self : int , A : Dict , A : Optional[int] , A : int , A : Optional[int] , A : str , *A : Any ):
_UpperCAmelCase : str = CTRLModel(config=A )
model.to(A )
model.eval()
model(A , token_type_ids=A , head_mask=A )
model(A , token_type_ids=A )
_UpperCAmelCase : Union[str, Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _A ( self : Dict , A : Any , A : int , A : List[str] , A : Dict , A : int , *A : List[Any] ):
_UpperCAmelCase : Dict = CTRLLMHeadModel(A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[Any] = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Any ):
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Dict = config_and_inputs
_UpperCAmelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _A ( self : Optional[Any] , A : List[str] , A : List[Any] , A : List[Any] , A : Union[str, Any] , *A : Optional[int] ):
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : Union[str, Any] = CTRLForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Union[str, Any] = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase_ (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Dict = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase: Optional[int] = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCamelCase: Dict = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase: str = True
__UpperCamelCase: int = False
__UpperCamelCase: Any = False
def _A ( self : List[str] , A : int , A : Union[str, Any] , A : Tuple , A : int , A : Tuple ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _A ( self : List[str] ):
_UpperCAmelCase : List[str] = CTRLModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=A , n_embd=37 )
def _A ( self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _A ( self : Dict ):
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*A )
def _A ( self : Tuple ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self : Optional[Any] ):
pass
@slow
def _A ( self : List[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = CTRLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip("The model doesn\'t support left padding" ) # and it's not used enough to be worth fixing :)
def _A ( self : Tuple ):
pass
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _A ( self : str ):
_UpperCAmelCase : List[Any] = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(A )
_UpperCAmelCase : Tuple = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=A ) # Legal the president is
_UpperCAmelCase : Dict = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_UpperCAmelCase : Any = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A )
| 31 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 353 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(a__ )
class _snake_case ( a__ ):
snake_case__ = "rag"
snake_case__ = True
def __init__( self : Dict , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=" / " , UpperCAmelCase : Optional[int]=" // " , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Union[str, Any]=300 , UpperCAmelCase : int=768 , UpperCAmelCase : Any=8 , UpperCAmelCase : Any="wiki_dpr" , UpperCAmelCase : Any="train" , UpperCAmelCase : Union[str, Any]="compressed" , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=False , UpperCAmelCase : List[str]=False , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : int=True , UpperCAmelCase : str=False , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Dict=False , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : str , ):
super().__init__(
bos_token_id=UpperCAmelCase , pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , prefix=UpperCAmelCase , vocab_size=UpperCAmelCase , **UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__lowerCamelCase : Dict = kwargs.pop("question_encoder" )
__lowerCamelCase : str = question_encoder_config.pop("model_type" )
__lowerCamelCase : List[Any] = kwargs.pop("generator" )
__lowerCamelCase : Tuple = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
__lowerCamelCase : Optional[int] = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Tuple = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Dict = reduce_loss
__lowerCamelCase : Optional[Any] = label_smoothing
__lowerCamelCase : List[Any] = exclude_bos_score
__lowerCamelCase : List[str] = do_marginalize
__lowerCamelCase : str = title_sep
__lowerCamelCase : Optional[Any] = doc_sep
__lowerCamelCase : List[Any] = n_docs
__lowerCamelCase : List[str] = max_combined_length
__lowerCamelCase : int = dataset
__lowerCamelCase : Any = dataset_split
__lowerCamelCase : str = index_name
__lowerCamelCase : int = retrieval_vector_size
__lowerCamelCase : Union[str, Any] = retrieval_batch_size
__lowerCamelCase : Dict = passages_path
__lowerCamelCase : int = index_path
__lowerCamelCase : List[str] = use_dummy_dataset
__lowerCamelCase : int = output_retrieved
__lowerCamelCase : List[str] = do_deduplication
__lowerCamelCase : Tuple = use_cache
if self.forced_eos_token_id is None:
__lowerCamelCase : Tuple = getattr(self.generator , "forced_eos_token_id" , UpperCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : str , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : PretrainedConfig , **UpperCAmelCase : List[Any] ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Any = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Tuple = self.question_encoder.to_dict()
__lowerCamelCase : List[Any] = self.generator.to_dict()
__lowerCamelCase : Optional[Any] = self.__class__.model_type
return output | 64 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
snake_case = [True] * limit
snake_case = False
snake_case = False
snake_case = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case = i * 2
while index < limit:
snake_case = False
snake_case = index + i
snake_case = [2]
for i in range(3 , _UpperCamelCase , 2 ):
if is_prime[i]:
primes.append(_UpperCamelCase )
return primes
def lowerCAmelCase__ ( _UpperCamelCase : int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
snake_case = prime_sieve(_UpperCamelCase )
snake_case = 0
snake_case = 0
for i in range(len(_UpperCamelCase ) ):
for j in range(i + length , len(_UpperCamelCase ) ):
snake_case = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case = j - i
snake_case = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 150 | """simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE__ = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
snake_case = bs[:]
snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case = set()
snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case = char
return pairs
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="replace" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase=False , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else bos_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else sep_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else cls_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
super().__init__(
errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
snake_case = json.load(lowerCAmelCase )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = errors # how to handle errors in decoding
snake_case = bytes_to_unicode()
snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase , encoding='utf-8' ) as merges_handle:
snake_case = merges_handle.read().split('\n' )[1:-1]
snake_case = [tuple(merge.split() ) for merge in bpe_merges]
snake_case = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
snake_case = {}
snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case ( self ):
"""simple docstring"""
return len(self.encoder )
def snake_case ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
snake_case = tuple(lowerCAmelCase )
snake_case = get_pairs(lowerCAmelCase )
if not pairs:
return token
while True:
snake_case = min(lowerCAmelCase , key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case ,snake_case = bigram
snake_case = []
snake_case = 0
while i < len(lowerCAmelCase ):
try:
snake_case = word.index(lowerCAmelCase , lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case = j
if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case = tuple(lowerCAmelCase )
snake_case = new_word
if len(lowerCAmelCase ) == 1:
break
else:
snake_case = get_pairs(lowerCAmelCase )
snake_case = ' '.join(lowerCAmelCase )
snake_case = word
return word
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for token in re.findall(self.pat , lowerCAmelCase ):
snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase ).split(' ' ) )
return bpe_tokens
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = ''.join(lowerCAmelCase )
snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' )
snake_case = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
snake_case = token_index
writer.write(' '.join(lowerCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case = [self.cls_token_id]
snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase=False , **lowerCAmelCase ):
"""simple docstring"""
snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase ) > 0 and not text[0].isspace()):
snake_case = ' ' + text
return (text, kwargs)
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase = None , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = super()._pad(
encoded_inputs=lowerCAmelCase , max_length=lowerCAmelCase , padding_strategy=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowerCAmelCase )
if needs_to_be_padded:
snake_case = len(lowerCAmelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 150 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __snake_case ( _UpperCAmelCase ):
__a = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = StableDiffusionLatentUpscalePipeline
UpperCamelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
UpperCamelCase__ : Dict = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
UpperCamelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ : Union[str, Any] = frozenset([] )
UpperCamelCase__ : int = True
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = 1
__a = 4
__a = (16, 16)
__a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(__SCREAMING_SNAKE_CASE)
return image
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
torch.manual_seed(0)
__a = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=__SCREAMING_SNAKE_CASE , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=__SCREAMING_SNAKE_CASE , only_cross_attention=__SCREAMING_SNAKE_CASE , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
__a = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
__a = EulerDiscreteScheduler(prediction_type='''sample''')
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
__a = CLIPTextModel(__SCREAMING_SNAKE_CASE)
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__a = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=0):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE).startswith('''mps'''):
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
else:
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE)
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**__SCREAMING_SNAKE_CASE)
pipe.to(__SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE)
__a = pipe(**__SCREAMING_SNAKE_CASE).images
__a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3))
__a = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55])
__a = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7E-3)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3)
def _lowerCamelCase ( self : int):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__a = self.get_dummy_components()
__a = self.pipeline_class(**__SCREAMING_SNAKE_CASE)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__SCREAMING_SNAKE_CASE)
pipe.to(__SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE)
__a = 2
__a = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__a = getattr(__SCREAMING_SNAKE_CASE , scheduler_enum.name)
__a = scheduler_cls.from_config(pipe.scheduler.config)
__a = pipe(**__SCREAMING_SNAKE_CASE)[0]
outputs.append(__SCREAMING_SNAKE_CASE)
assert check_same_shape(__SCREAMING_SNAKE_CASE)
@require_torch_gpu
@slow
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = torch.manual_seed(33)
__a = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa)
pipe.to('''cuda''')
__a = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa)
upscaler.to('''cuda''')
__a = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__a = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type='''latent''').images
__a = upscaler(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , guidance_scale=0 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , ).images[0]
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''')
assert np.abs((expected_image - image).mean()) < 5E-2
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = torch.manual_seed(33)
__a = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa)
upscaler.to('''cuda''')
__a = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''')
__a = upscaler(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , guidance_scale=0 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , ).images[0]
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''')
assert np.abs((expected_image - image).max()) < 5E-2
| 131 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return base * power(_UpperCAmelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
__snake_case :List[Any] = int(input('''Enter the base: ''').strip())
__snake_case :Dict = int(input('''Enter the exponent: ''').strip())
__snake_case :int = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__snake_case :Optional[Any] = 1 / result
print(f'{base} to the power of {exponent} is {result}')
| 131 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[str] = BioGptTokenizer
a__ : int = False
def _lowercase (self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase_ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__a ) )
def _lowercase (self : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = "lower newer"
return input_text, output_text
def _lowercase (self : str ):
UpperCAmelCase_ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ = "lower"
UpperCAmelCase_ = ["low", "er</w>"]
UpperCAmelCase_ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokens + ["<unk>"]
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 1 | from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( _lowerCamelCase ):
@staticmethod
@abstractmethod
def __a ( __UpperCamelCase ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __a ( self ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError()
| 143 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( A_ ):
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float , **_lowerCamelCase : str ):
_snake_case = feature_size
_snake_case = sampling_rate
_snake_case = padding_value
_snake_case = kwargs.pop('''padding_side''' , '''right''' )
_snake_case = kwargs.pop('''return_attention_mask''' , _lowerCamelCase )
super().__init__(**_lowerCamelCase )
def lowercase ( self : Optional[int] , _lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _lowerCamelCase : Union[bool, str, PaddingStrategy] = True , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
_snake_case = processed_features[self.model_input_names[0]]
_snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_lowerCamelCase ) == 0:
if return_attention_mask:
_snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_snake_case = required_input[0]
if isinstance(_lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_lowerCamelCase ):
_snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_lowerCamelCase ):
_snake_case = '''tf'''
elif is_torch_tensor(_lowerCamelCase ):
_snake_case = '''pt'''
elif isinstance(_lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
_snake_case = '''np'''
else:
raise ValueError(
f'''type of {first_element} unknown: {type(_lowerCamelCase )}. '''
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_snake_case = to_numpy(_lowerCamelCase )
else:
_snake_case = [to_numpy(_lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
_snake_case = self._get_padding_strategies(padding=_lowerCamelCase , max_length=_lowerCamelCase )
_snake_case = processed_features[self.model_input_names[0]]
_snake_case = len(_lowerCamelCase )
if not all(len(_lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_snake_case = []
for i in range(_lowerCamelCase ):
_snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
_snake_case = self._truncate(
_lowerCamelCase , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , truncation=_lowerCamelCase , )
truncated_inputs.append(_lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_snake_case = PaddingStrategy.MAX_LENGTH
_snake_case = {}
for i in range(_lowerCamelCase ):
# padding
_snake_case = self._pad(
truncated_inputs[i] , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
_snake_case = []
if value.dtype is np.dtype(np.floataa ):
_snake_case = value.astype(np.floataa )
batch_outputs[key].append(_lowerCamelCase )
return BatchFeature(_lowerCamelCase , tensor_type=_lowerCamelCase )
def lowercase ( self : str , _lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , ):
_snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_snake_case = len(_lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_snake_case = np.ones(len(_lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
_snake_case = max_length - len(_lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
_snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_snake_case = np.pad(
_lowerCamelCase , _lowerCamelCase , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_snake_case = np.pad(
_lowerCamelCase , _lowerCamelCase , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def lowercase ( self : Any , _lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_snake_case = len(_lowerCamelCase ) > max_length
if needs_to_be_truncated:
_snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def lowercase ( self : Optional[Any] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None ):
# Get padding strategy
if padding is not False:
if padding is True:
_snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
_snake_case = PaddingStrategy(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
_snake_case = padding
else:
_snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 369 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
_snake_case = [False] * len(__lowerCamelCase )
_snake_case = []
queue.append(__lowerCamelCase )
_snake_case = True
while queue:
_snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
_snake_case = True
_snake_case = u
return visited[t]
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> Dict:
# This array is filled by BFS and to store path
_snake_case = [-1] * (len(__lowerCamelCase ))
_snake_case = 0
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_snake_case = float('''Inf''' )
_snake_case = sink
while s != source:
# Find the minimum value in select path
_snake_case = min(__lowerCamelCase , graph[parent[s]][s] )
_snake_case = parent[s]
max_flow += path_flow
_snake_case = sink
while v != source:
_snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case = parent[v]
return max_flow
UpperCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase__ , UpperCAmelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 40 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : int = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ = 1
if upper_limit > 0:
A__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_lowerCamelCase : List[Any] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 1 |
a__: List[Any] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a__: str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict )->float:
A__ = from_type.lower().strip('''s''' )
A__ = to_type.lower().strip('''s''' )
A__ = UNIT_SYMBOL.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = UNIT_SYMBOL.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if from_sanitized not in METRIC_CONVERSION:
A__ = (
f"Invalid \'from_type\' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(SCREAMING_SNAKE_CASE__ )}"
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if to_sanitized not in METRIC_CONVERSION:
A__ = (
f"Invalid \'to_type\' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(SCREAMING_SNAKE_CASE__ )}"
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
A__ = METRIC_CONVERSION[from_sanitized]
A__ = METRIC_CONVERSION[to_sanitized]
A__ = 1
if from_exponent > to_exponent:
A__ = from_exponent - to_exponent
else:
A__ = -(to_exponent - from_exponent)
return value * pow(10 , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 366 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: List[Any] = logging.get_logger(__name__)
a__: Optional[Any] = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''unispeech'''
def __init__( self,__lowerCamelCase=32,__lowerCamelCase=768,__lowerCamelCase=12,__lowerCamelCase=12,__lowerCamelCase=3072,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.02,__lowerCamelCase=1E-5,__lowerCamelCase="group",__lowerCamelCase="gelu",__lowerCamelCase=(512, 512, 512, 512, 512, 512, 512),__lowerCamelCase=(5, 2, 2, 2, 2, 2, 2),__lowerCamelCase=(10, 3, 3, 3, 3, 2, 2),__lowerCamelCase=False,__lowerCamelCase=128,__lowerCamelCase=16,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=0.05,__lowerCamelCase=10,__lowerCamelCase=2,__lowerCamelCase=0.0,__lowerCamelCase=10,__lowerCamelCase=0,__lowerCamelCase=320,__lowerCamelCase=2,__lowerCamelCase=0.1,__lowerCamelCase=100,__lowerCamelCase=256,__lowerCamelCase=256,__lowerCamelCase=0.1,__lowerCamelCase="mean",__lowerCamelCase=False,__lowerCamelCase=False,__lowerCamelCase=256,__lowerCamelCase=80,__lowerCamelCase=0,__lowerCamelCase=1,__lowerCamelCase=2,__lowerCamelCase=0.5,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase,pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = num_ctc_classes
A__ = vocab_size
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
A__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# pretraining loss
A__ = replace_prob
@property
def UpperCamelCase ( self ):
return functools.reduce(operator.mul,self.conv_stride,1 )
| 39 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase_ : Any = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ):
"""simple docstring"""
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : Any ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : List[Any] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : str = 16
elif accelerator.mixed_precision != "no":
_snake_case : Optional[int] = 8
else:
_snake_case : Optional[int] = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
_snake_case : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
_snake_case : List[Any] = 2
# Initialize accelerator
_snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config["""lr"""]
_snake_case : str = int(config["""num_epochs"""] )
_snake_case : Union[str, Any] = int(config["""seed"""] )
_snake_case : Union[str, Any] = int(config["""batch_size"""] )
_snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ )
_snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
_snake_case : str = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : int = model(**snake_case__ )
_snake_case : str = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : int = model(**snake_case__ )
_snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
_snake_case : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : Dict = parser.parse_args()
_snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 64 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ = 1000 ) -> int:
UpperCamelCase_ = 2**power
UpperCamelCase_ = str(UpperCamelCase__ )
UpperCamelCase_ = list(UpperCamelCase__ )
UpperCamelCase_ = 0
for i in list_num:
sum_of_num += int(UpperCamelCase__ )
return sum_of_num
if __name__ == "__main__":
_UpperCAmelCase = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
_UpperCAmelCase = solution(power)
print('Sum of the digits is: ', result)
| 367 |
import math
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_UpperCAmelCase = 'Enter the base and the power separated by a comma: '
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_UpperCAmelCase = res(xa, ya)
_UpperCAmelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 328 | 0 |
from typing import Any
class _a :
def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Any )-> Tuple:
lowerCAmelCase__ : str = data
lowerCAmelCase__ : List[str] = None
def __repr__( self : int )-> str:
return F'Node({self.data})'
class _a :
def __init__( self : Optional[Any] )-> int:
lowerCAmelCase__ : str = None
def __iter__( self : Tuple )-> Any:
lowerCAmelCase__ : Dict = self.head
while node:
yield node.data
lowerCAmelCase__ : List[str] = node.next
def __len__( self : Optional[int] )-> int:
return sum(1 for _ in self )
def __repr__( self : Optional[Any] )-> str:
return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] )
def __getitem__( self : int , _SCREAMING_SNAKE_CASE : int )-> Any:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any )-> None:
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
lowerCAmelCase__ : Optional[Any] = self.head
for _ in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Tuple = current.next
lowerCAmelCase__ : str = data
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Any )-> None:
self.insert_nth(len(self ) , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Any )-> None:
self.insert_nth(0 , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any )-> None:
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
lowerCAmelCase__ : Optional[int] = Node(_SCREAMING_SNAKE_CASE )
if self.head is None:
lowerCAmelCase__ : Union[str, Any] = new_node
elif index == 0:
lowerCAmelCase__ : Tuple = self.head # link new_node to head
lowerCAmelCase__ : str = new_node
else:
lowerCAmelCase__ : Optional[int] = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : Optional[Any] = temp.next
lowerCAmelCase__ : int = temp.next
lowerCAmelCase__ : str = new_node
def UpperCAmelCase__( self : Optional[Any] )-> None: # print every node data
print(self )
def UpperCAmelCase__( self : str )-> Any:
return self.delete_nth(0 )
def UpperCAmelCase__( self : int )-> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int = 0 )-> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
lowerCAmelCase__ : Dict = self.head # default first node
if index == 0:
lowerCAmelCase__ : Union[str, Any] = self.head.next
else:
lowerCAmelCase__ : List[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : int = temp.next
lowerCAmelCase__ : Dict = temp.next
lowerCAmelCase__ : Dict = temp.next.next
return delete_node.data
def UpperCAmelCase__( self : Dict )-> bool:
return self.head is None
def UpperCAmelCase__( self : Union[str, Any] )-> None:
lowerCAmelCase__ : str = None
lowerCAmelCase__ : int = self.head
while current:
# Store the current node's next node.
lowerCAmelCase__ : Union[str, Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase__ : Dict = prev
# Make the previous node be the current node
lowerCAmelCase__ : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase__ : str = next_node
# Return prev in order to put the head at the end
lowerCAmelCase__ : Optional[int] = prev
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : str = LinkedList()
assert linked_list.is_empty() is True
assert str(_a ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_a ) == i
linked_list.insert_nth(_a , i + 1 )
assert str(_a ) == "->".join(str(_a ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_a ) == "->".join(str(_a ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_a ) == 9
assert str(_a ) == "->".join(str(_a ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase__ : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_a ) == "->".join(str(_a ) for i in range(-8 , 1 ) )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = [
-9,
100,
Node(77_345_112 ),
'''dlrow olleH''',
7,
5_555,
0,
-1_92.5_55_55,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase__ : int = LinkedList()
for i in test_input:
linked_list.insert_tail(_a )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_a ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase__ : Any = linked_list.delete_head()
assert result == -9
assert (
str(_a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase__ : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(_a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase__ : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(_a )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_a )
assert (
str(_a )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_a )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase_ ( ):
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase__ : Dict = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(_a )
print('''\nReading/changing Node data using indexing:''' )
print(f'Element at Position 1: {linked_list[1]}' )
lowerCAmelCase__ : List[str] = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(_a )
print(f'length of linked_list is : {len(_a )}' )
if __name__ == "__main__":
main()
| 131 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase = logging.get_logger(__name__)
class _a ( _lowercase):
_a : Optional[Any] = ['''pixel_values''']
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **_SCREAMING_SNAKE_CASE : int , )-> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 256}
lowerCAmelCase__ : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase__ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Any = resample
lowerCAmelCase__ : str = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : str = do_rescale
lowerCAmelCase__ : List[str] = rescale_factor
lowerCAmelCase__ : int = do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Dict , )-> np.ndarray:
lowerCAmelCase__ : str = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ : List[str] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : List[str] , )-> np.ndarray:
lowerCAmelCase__ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Optional[int] )-> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : str , )-> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : ImageInput , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE : Tuple , )-> Optional[Any]:
lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : List[str] = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ : List[Any] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCAmelCase__ : Dict = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCAmelCase__ : List[Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCAmelCase__ : Tuple = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Tuple] = None )-> List[Any]:
lowerCAmelCase__ : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Tuple = target_sizes.numpy()
lowerCAmelCase__ : Tuple = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Any = logits.argmax(dim=1 )
lowerCAmelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 131 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple =get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : str =250_004
lowerCAmelCase : List[str] =250_020
@require_sentencepiece
@require_tokenizers
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = MBartaaTokenizer
__A = MBartaaTokenizerFast
__A = True
__A = True
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ :Any = MBartaaTokenizer(lowercase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Dict = "<s>"
lowercase_ :Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowercase ) , 1_054 )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :int = MBartaaTokenizer(lowercase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowercase )
lowercase_ :Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ :Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowercase_ :List[Any] = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ :List[Any] = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Optional[int] = {"input_ids": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase_ :str = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ :int = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowercase_ :List[Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowercase_ :Tuple = tempfile.mkdtemp()
lowercase_ :str = tokenizer_r.save_pretrained(lowercase )
lowercase_ :str = tokenizer_p.save_pretrained(lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
lowercase_ :int = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowercase , lowercase )
# Checks everything loads correctly in the same way
lowercase_ :Optional[int] = tokenizer_r.from_pretrained(lowercase )
lowercase_ :str = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase )
# Save tokenizer rust, legacy_format=True
lowercase_ :Any = tempfile.mkdtemp()
lowercase_ :Union[str, Any] = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase )
lowercase_ :Any = tokenizer_p.save_pretrained(lowercase )
# Checks it save with the same files
self.assertSequenceEqual(lowercase , lowercase )
# Checks everything loads correctly in the same way
lowercase_ :Union[str, Any] = tokenizer_r.from_pretrained(lowercase )
lowercase_ :List[Any] = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
shutil.rmtree(lowercase )
# Save tokenizer rust, legacy_format=False
lowercase_ :Dict = tempfile.mkdtemp()
lowercase_ :Any = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase )
lowercase_ :Any = tokenizer_p.save_pretrained(lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase_ :Union[str, Any] = tokenizer_r.from_pretrained(lowercase )
lowercase_ :Optional[int] = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
shutil.rmtree(lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
__A = "facebook/mbart-large-50-one-to-many-mmt"
__A = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__A = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__A = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def lowercase__ ( cls : List[Any] ):
"""simple docstring"""
lowercase_ :MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
lowercase_ :Union[str, Any] = 1
return cls
def lowercase__ ( self : List[str] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250_038 )
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
def lowercase__ ( self : Dict ):
"""simple docstring"""
self.assertIn(lowercase , self.tokenizer.all_special_ids )
lowercase_ :Optional[Any] = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
lowercase_ :Any = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase )
lowercase_ :Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
self.assertNotIn(self.tokenizer.eos_token , lowercase )
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :List[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowercase )
lowercase_ :List[str] = 10
lowercase_ :Any = self.tokenizer(lowercase , max_length=lowercase , truncation=lowercase ).input_ids[0]
self.assertEqual(ids[0] , lowercase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowercase ) , lowercase )
def lowercase__ ( self : Dict ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250_053, 250_001] )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :str = tempfile.mkdtemp()
lowercase_ :Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase )
lowercase_ :int = MBartaaTokenizer.from_pretrained(lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase )
@require_torch
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase , return_tensors="pt" )
lowercase_ :Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ :Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowercase_ :List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Any = self.tokenizer(self.src_text , padding=lowercase , truncation=lowercase , max_length=3 , return_tensors="pt" )
lowercase_ :Dict = self.tokenizer(
text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=10 , return_tensors="pt" )
lowercase_ :Optional[int] = targets["input_ids"]
lowercase_ :Union[str, Any] = shift_tokens_right(lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Tuple = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(lowercase ) , {
# en_XX, A, test, EOS
"input_ids": [[250_004, 62, 3_034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250_001,
} , )
| 147 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : list ):
if len(__lowerCamelCase ) <= 1:
return lst
lowercase_ :Optional[Any] = 1
while i < len(__lowerCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase_ , lowercase_ :int = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase_ :Dict = 1
return lst
if __name__ == "__main__":
lowerCAmelCase : Any =input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase : List[str] =[int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 147 | 1 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> int:
__lowercase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
__lowercase = 0
while number > 0:
__lowercase = number % 10
sum_of_digits += last_digit
__lowercase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] = 100 ) -> int:
__lowercase = factorial(A_ )
__lowercase = split_and_add(A_ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 325 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int):
a : List[Any] = metric_id
class _A :
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self : List[str]):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
if "tmp_path" in args:
a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ )
| 40 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for char in word:
A : List[str] = ord(snake_case__ )
if not _is_chinese_char(snake_case__ ):
return 0
return 1
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = set()
for token in tokens:
A : str = len(snake_case__ ) > 1 and is_chinese(snake_case__ )
if chinese_word:
word_set.add(snake_case__ )
A : Optional[Any] = list(snake_case__ )
return word_list
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
A : int = max([len(snake_case__ ) for w in chinese_word_set] )
A : Any = bert_tokens
A, A : int = 0, len(snake_case__ )
while start < end:
A : Optional[int] = True
if is_chinese(bert_word[start] ):
A : Union[str, Any] = min(end - start , snake_case__ )
for i in range(snake_case__ , 1 , -1 ):
A : List[str] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
A : str = '''##''' + bert_word[j]
A : Dict = start + i
A : Optional[int] = False
break
if single_word:
start += 1
return bert_word
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = []
for i in range(0 , len(snake_case__ ) , 100 ):
A : Union[str, Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
A : str = [get_chinese_word(snake_case__ ) for r in res]
ltp_res.extend(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
A : Optional[Any] = []
for i in range(0 , len(snake_case__ ) , 100 ):
A : Union[str, Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=snake_case__ , truncation=snake_case__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(snake_case__ ) == len(snake_case__ )
A : List[str] = []
for input_ids, chinese_word in zip(snake_case__ , snake_case__ ):
A : int = []
for id in input_ids:
A : Dict = bert_tokenizer._convert_id_to_token(snake_case__ )
input_tokens.append(snake_case__ )
A : Union[str, Any] = add_sub_symbol(snake_case__ , snake_case__ )
A : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case__ ):
if token[:2] == "##":
A : Tuple = token[2:]
# save chinese tokens' pos
if len(snake_case__ ) == 1 and _is_chinese_char(ord(snake_case__ ) ):
ref_id.append(snake_case__ )
ref_ids.append(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
return ref_ids
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
A : Optional[int] = f.readlines()
A : Dict = [line.strip() for line in data if len(snake_case__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A : int = LTP(args.ltp ) # faster in GPU device
A : Any = BertTokenizer.from_pretrained(args.bert )
A : int = prepare_ref(snake_case__ , snake_case__ , snake_case__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
A : Tuple = [json.dumps(snake_case__ ) + '''\n''' for ref in ref_ids]
f.writelines(snake_case__ )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
lowercase : Optional[Any] = parser.parse_args()
main(args)
| 311 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = 2
A : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 1 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
_lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
_lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
_lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
_lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
_lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )["""hidden_states"""][0]
_lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )["""hidden_states"""][0]
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3 ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OpenLlamaModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = input_dict["""input_ids"""]
_lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = """single_label_classification"""
_lowerCAmelCase = input_dict["""input_ids"""]
_lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = """multi_label_classification"""
_lowerCAmelCase = input_dict["""input_ids"""]
_lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
_lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def snake_case ( self ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
_lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
_lowerCAmelCase = original_model(_snake_case ).last_hidden_state
_lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = {"""type""": scaling_type, """factor""": 10.0}
_lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
_lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
_lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5 ) )
| 82 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ''
else:
_UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = dct.pop(__lowerCAmelCase )
_UpperCAmelCase = val
def __A ( )-> str:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase = 8
# set labels if required
if not base_model:
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase = 384
_UpperCAmelCase = 1_536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
# load original model from torch hub
_UpperCAmelCase = torch.hub.load('facebookresearch/dino:main' , __lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
_UpperCAmelCase = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if base_model:
_UpperCAmelCase = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase = ViTImageProcessor()
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(__lowerCAmelCase )
if base_model:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 39 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
A_ = {
'''google/rembert''': 2_56,
}
A_ = '''▁'''
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = RemBertTokenizer
def __init__( self: Any, a_: Optional[int]=None, a_: Optional[Any]=None, a_: str=True, a_: Dict=True, a_: Any=False, a_: Optional[int]="[CLS]", a_: Union[str, Any]="[SEP]", a_: str="<unk>", a_: Optional[Any]="[SEP]", a_: Optional[Any]="<pad>", a_: Optional[int]="[CLS]", a_: Tuple="[MASK]", **a_: Optional[int], ):
'''simple docstring'''
_snake_case : str = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else mask_token
super().__init__(
a_, tokenizer_file=a_, do_lower_case=a_, remove_space=a_, keep_accents=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, **a_, )
_snake_case : Dict = do_lower_case
_snake_case : Tuple = remove_space
_snake_case : Any = keep_accents
_snake_case : Union[str, Any] = vocab_file
_snake_case : Union[str, Any] = False if not self.vocab_file else True
def UpperCamelCase_ ( self: int, a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.sep_token_id]
_snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self: str, a_: List[int], a_: Optional[List[int]] = None, a_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1]
def UpperCamelCase_ ( self: Tuple, a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Optional[int] = [self.sep_token_id]
_snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: int, a_: str, a_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(a_ ) )
return
_snake_case : Optional[int] = os.path.join(
a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file, a_ )
return (out_vocab_file,)
| 132 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
A_ = '''<<<<<<< This should probably be modified because it mentions: '''
A_ = '''=======
>>>>>>>
'''
A_ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
A_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def UpperCAmelCase__ (snake_case__ : Namespace ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase( __a ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
_snake_case : Tuple = parser.add_parser(
"""convert""", help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""", )
train_parser.add_argument(
"""--tfds_path""", type=a_, required=a_, help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""", )
train_parser.add_argument(
"""--datasets_directory""", type=a_, required=a_, help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=a_ )
def __init__( self: List[str], a_: str, a_: str, *a_: str ):
'''simple docstring'''
_snake_case : Optional[Any] = get_logger("""datasets-cli/converting""" )
_snake_case : Any = tfds_path
_snake_case : Optional[Any] = datasets_directory
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
_snake_case : int = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_snake_case : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
_snake_case : Union[str, Any] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
_snake_case : Tuple = []
_snake_case : Dict = []
_snake_case : Optional[Any] = {}
if os.path.isdir(self._tfds_path ):
_snake_case : List[str] = os.listdir(a_ )
else:
_snake_case : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
_snake_case : Dict = os.path.join(a_, a_ )
_snake_case : Union[str, Any] = os.path.join(a_, a_ )
if not os.path.isfile(a_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(a_, encoding="""utf-8""" ) as f:
_snake_case : str = f.readlines()
_snake_case : List[str] = []
_snake_case : Any = False
_snake_case : Union[str, Any] = False
_snake_case : Optional[Any] = []
for line in lines:
_snake_case : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_snake_case : Optional[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
_snake_case : Optional[int] = """"""
continue
elif "from absl import logging" in out_line:
_snake_case : int = """from datasets import logging\n"""
elif "getLogger" in out_line:
_snake_case : Any = out_line.replace("""getLogger""", """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_snake_case : Union[str, Any] = True
_snake_case : Optional[Any] = list(filter(lambda a_ : e in out_line, a_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a_ ) + """\n""" )
out_lines.append(a_ )
out_lines.append(a_ )
continue
else:
for pattern, replacement in TO_CONVERT:
_snake_case : List[str] = re.sub(a_, a_, a_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_snake_case : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""", a_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
_snake_case : Optional[Any] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_snake_case : Tuple = True
out_lines.append(a_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_snake_case : List[str] = f_name.replace(""".py""", """""" )
_snake_case : str = os.path.join(a_, a_ )
_snake_case : str = os.path.join(a_, a_ )
os.makedirs(a_, exist_ok=a_ )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(a_ )
if needs_manual_update:
with_manual_update.append(a_ )
with open(a_, """w""", encoding="""utf-8""" ) as f:
f.writelines(a_ )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
_snake_case : Optional[int] = os.path.basename(a_ )
_snake_case : Optional[Any] = imports_to_builder_map[f_name.replace(""".py""", """""" )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(a_, a_ )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 132 | 1 |
from ...configuration_utils import PretrainedConfig
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "bert-generation"
def __init__( self , UpperCAmelCase=5_0358 , UpperCAmelCase=1024 , UpperCAmelCase=24 , UpperCAmelCase=16 , UpperCAmelCase=4096 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase="absolute" , UpperCAmelCase=True , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
| 39 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , )-> Optional[int]:
'''simple docstring'''
super().__init__(features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Sql(
cache_dir=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , sql=SCREAMING_SNAKE_CASE_ , con=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , )
# Build dataset for splits
__UpperCamelCase = self.builder.as_dataset(
split='''train''' , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
__UpperCamelCase = dataset
__UpperCamelCase = name
__UpperCamelCase = con
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = to_sql_kwargs
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.to_sql_kwargs.pop('''sql''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''con''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''index''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._write(index=SCREAMING_SNAKE_CASE_ , **self.to_sql_kwargs )
return written
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas()
__UpperCamelCase = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return num_rows or len(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 328 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a_ ( __lowercase : str , __lowercase : List[str] , __lowercase : Dict ) -> List[str]:
if isinstance(__lowercase , torch.Tensor ):
return image
elif isinstance(__lowercase , PIL.Image.Image ):
_snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
_snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_snake_case = np.concatenate(__lowercase , axis=0 )
_snake_case = np.array(__lowercase ).astype(np.floataa ) / 255.0
_snake_case = image.transpose(0 , 3 , 1 , 2 )
_snake_case = 2.0 * image - 1.0
_snake_case = torch.from_numpy(__lowercase )
elif isinstance(image[0] , torch.Tensor ):
_snake_case = torch.cat(__lowercase , dim=0 )
return image
def a_ ( __lowercase : int , __lowercase : Any , __lowercase : List[Any] , __lowercase : Tuple=0.9_9_9_5 ) -> List[str]:
if not isinstance(__lowercase , np.ndarray ):
_snake_case = True
_snake_case = va.device
_snake_case = va.cpu().numpy()
_snake_case = va.cpu().numpy()
_snake_case = np.sum(va * va / (np.linalg.norm(__lowercase ) * np.linalg.norm(__lowercase )) )
if np.abs(__lowercase ) > DOT_THRESHOLD:
_snake_case = (1 - t) * va + t * va
else:
_snake_case = np.arccos(__lowercase )
_snake_case = np.sin(__lowercase )
_snake_case = theta_a * t
_snake_case = np.sin(__lowercase )
_snake_case = np.sin(theta_a - theta_t ) / sin_theta_a
_snake_case = sin_theta_t / sin_theta_a
_snake_case = sa * va + sa * va
if inputs_are_torch:
_snake_case = torch.from_numpy(__lowercase ).to(__lowercase )
return va
def a_ ( __lowercase : int , __lowercase : Optional[int] ) -> List[Any]:
_snake_case = F.normalize(__lowercase , dim=-1 )
_snake_case = F.normalize(__lowercase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a_ ( __lowercase : int , __lowercase : Any ) -> Optional[Any]:
for param in model.parameters():
_snake_case = value
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : AutoencoderKL , lowercase : CLIPTextModel , lowercase : CLIPModel , lowercase : CLIPTokenizer , lowercase : UNetaDConditionModel , lowercase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowercase : CLIPFeatureExtractor , lowercase : Any=None , lowercase : List[str]=None , lowercase : List[str]=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=lowercase , text_encoder=lowercase , clip_model=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , feature_extractor=lowercase , coca_model=lowercase , coca_tokenizer=lowercase , coca_transform=lowercase , )
_snake_case = (
feature_extractor.size
if isinstance(feature_extractor.size , lowercase )
else feature_extractor.size['shortest_edge']
)
_snake_case = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowercase )
set_requires_grad(self.clip_model , lowercase )
def A ( self : Optional[int] , lowercase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def A ( self : Dict ):
'''simple docstring'''
self.enable_attention_slicing(lowercase )
def A ( self : int ):
'''simple docstring'''
set_requires_grad(self.vae , lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
set_requires_grad(self.vae , lowercase )
def A ( self : str ):
'''simple docstring'''
set_requires_grad(self.unet , lowercase )
def A ( self : Dict ):
'''simple docstring'''
set_requires_grad(self.unet , lowercase )
def A ( self : str , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = min(int(num_inference_steps * strength ) , lowercase )
_snake_case = max(num_inference_steps - init_timestep , 0 )
_snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A ( self : Optional[Any] , lowercase : List[str] , lowercase : Optional[Any] , lowercase : str , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : List[Any]=None ):
'''simple docstring'''
if not isinstance(lowercase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(lowercase )}''' )
_snake_case = image.to(device=lowercase , dtype=lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase )
]
_snake_case = torch.cat(lowercase , dim=0 )
else:
_snake_case = self.vae.encode(lowercase ).latent_dist.sample(lowercase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_snake_case = 0.18215 * init_latents
_snake_case = init_latents.repeat_interleave(lowercase , dim=0 )
_snake_case = randn_tensor(init_latents.shape , generator=lowercase , device=lowercase , dtype=lowercase )
# get latents
_snake_case = self.scheduler.add_noise(lowercase , lowercase , lowercase )
_snake_case = init_latents
return latents
def A ( self : int , lowercase : int ):
'''simple docstring'''
_snake_case = self.coca_transform(lowercase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_snake_case = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_snake_case = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def A ( self : List[Any] , lowercase : Dict , lowercase : Any ):
'''simple docstring'''
_snake_case = self.feature_extractor.preprocess(lowercase )
_snake_case = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_snake_case = self.clip_model.get_image_features(lowercase )
_snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase )
_snake_case = image_embeddings_clip.repeat_interleave(lowercase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def A ( self : int , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : int , lowercase : Any , lowercase : Union[str, Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = latents.detach().requires_grad_()
_snake_case = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
_snake_case = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_snake_case = self.scheduler.alphas_cumprod[timestep]
_snake_case = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_snake_case = torch.sqrt(lowercase )
_snake_case = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowercase ):
_snake_case = self.scheduler.sigmas[index]
_snake_case = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_snake_case = 1 / 0.18215 * sample
_snake_case = self.vae.decode(lowercase ).sample
_snake_case = (image / 2 + 0.5).clamp(0 , 1 )
_snake_case = transforms.Resize(self.feature_extractor_size )(lowercase )
_snake_case = self.normalize(lowercase ).to(latents.dtype )
_snake_case = self.clip_model.get_image_features(lowercase )
_snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase )
_snake_case = spherical_dist_loss(lowercase , lowercase ).mean() * clip_guidance_scale
_snake_case = -torch.autograd.grad(lowercase , lowercase )[0]
if isinstance(self.scheduler , lowercase ):
_snake_case = latents.detach() + grads * (sigma**2)
_snake_case = noise_pred_original
else:
_snake_case = noise_pred_original - torch.sqrt(lowercase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : int , lowercase : Union[torch.FloatTensor, PIL.Image.Image] , lowercase : Union[torch.FloatTensor, PIL.Image.Image] , lowercase : Optional[str] = None , lowercase : Optional[str] = None , lowercase : Optional[int] = 512 , lowercase : Optional[int] = 512 , lowercase : float = 0.6 , lowercase : Optional[int] = 50 , lowercase : Optional[float] = 7.5 , lowercase : Optional[int] = 1 , lowercase : float = 0.0 , lowercase : Optional[float] = 100 , lowercase : Optional[torch.Generator] = None , lowercase : Optional[str] = "pil" , lowercase : bool = True , lowercase : float = 0.8 , lowercase : float = 0.1 , lowercase : float = 0.1 , ):
'''simple docstring'''
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(lowercase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(lowercase , torch.Generator ) and batch_size > 1:
_snake_case = [generator] + [None] * (batch_size - 1)
_snake_case = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_snake_case = [x[0] for x in coca_is_none if x[1]]
_snake_case = ', '.join(lowercase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowercase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_snake_case = self.get_image_description(lowercase )
if style_prompt is None:
if len(lowercase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_snake_case = self.get_image_description(lowercase )
# get prompt text embeddings for content and style
_snake_case = self.tokenizer(
lowercase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
_snake_case = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_snake_case = self.tokenizer(
lowercase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
_snake_case = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_snake_case = slerp(lowercase , lowercase , lowercase )
# duplicate text embeddings for each generation per prompt
_snake_case = text_embeddings.repeat_interleave(lowercase , dim=0 )
# set timesteps
_snake_case = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_snake_case = {}
if accepts_offset:
_snake_case = 1
self.scheduler.set_timesteps(lowercase , **lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_snake_case , _snake_case = self.get_timesteps(lowercase , lowercase , self.device )
_snake_case = timesteps[:1].repeat(lowercase )
# Preprocess image
_snake_case = preprocess(lowercase , lowercase , lowercase )
_snake_case = self.prepare_latents(
lowercase , lowercase , lowercase , text_embeddings.dtype , self.device , lowercase )
_snake_case = preprocess(lowercase , lowercase , lowercase )
_snake_case = self.prepare_latents(
lowercase , lowercase , lowercase , text_embeddings.dtype , self.device , lowercase )
_snake_case = slerp(lowercase , lowercase , lowercase )
if clip_guidance_scale > 0:
_snake_case = self.get_clip_image_embeddings(lowercase , lowercase )
_snake_case = self.get_clip_image_embeddings(lowercase , lowercase )
_snake_case = slerp(
lowercase , lowercase , lowercase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_snake_case = content_text_input.input_ids.shape[-1]
_snake_case = self.tokenizer([''] , padding='max_length' , max_length=lowercase , return_tensors='pt' )
_snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_snake_case = uncond_embeddings.repeat_interleave(lowercase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_snake_case = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_snake_case = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_snake_case = torch.randn(lowercase , generator=lowercase , device='cpu' , dtype=lowercase ).to(
self.device )
else:
_snake_case = torch.randn(lowercase , generator=lowercase , device=self.device , dtype=lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_snake_case = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_snake_case = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_snake_case = {}
if accepts_eta:
_snake_case = eta
# check if the scheduler accepts generator
_snake_case = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_snake_case = generator
with self.progress_bar(total=lowercase ):
for i, t in enumerate(lowercase ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
_snake_case = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_snake_case , _snake_case = noise_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_snake_case = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_snake_case , _snake_case = self.cond_fn(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_snake_case = 1 / 0.18215 * latents
_snake_case = self.vae.decode(lowercase ).sample
_snake_case = (image / 2 + 0.5).clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(lowercase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowercase , nsfw_content_detected=lowercase ) | 361 |
def a_ ( __lowercase : int = 50_000_000 ) -> int:
_snake_case = set()
_snake_case = int((limit - 24) ** (1 / 2) )
_snake_case = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __lowercase ) ) )
for primea in primes:
_snake_case = primea * primea
for primea in primes:
_snake_case = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_snake_case = primea * primea * primea * primea
_snake_case = square + cube + tetr
if total >= limit:
break
ret.add(__lowercase )
return len(__lowercase )
if __name__ == "__main__":
print(F'{solution() = }') | 130 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
@property
def __snake_case (self ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_: Dict = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("""DownBlock2D""", """AttnDownBlock2D"""), up_block_types=("""AttnUpBlock2D""", """UpBlock2D"""), )
return model
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: List[Any] = self.dummy_uncond_unet
UpperCAmelCase_: Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase_: Any = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = torch.manual_seed(0 )
UpperCAmelCase_: List[str] = pipe(num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type="""numpy""" ).images
UpperCAmelCase_: Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_: Optional[Any] = pipe(num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type="""numpy""", return_dict=SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_: Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_: Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Any:
UpperCAmelCase_: Any = """google/ncsnpp-celebahq-256"""
UpperCAmelCase_: Dict = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase_: int = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = torch.manual_seed(0 )
UpperCAmelCase_: Tuple = pipe(num_inference_steps=20, generator=SCREAMING_SNAKE_CASE_, output_type="""numpy""" ).images
UpperCAmelCase_: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_: Dict = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 147 |
from __future__ import annotations
from typing import Any
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_ = 6 ) -> None:
UpperCAmelCase_: Node | None = None
UpperCAmelCase_: Node | None = None
self.create_linked_list(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCAmelCase_: Optional[Any] = Node()
UpperCAmelCase_: Optional[Any] = current_node
UpperCAmelCase_: List[str] = current_node
UpperCAmelCase_: List[Any] = current_node
for _ in range(1, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[int] = Node()
UpperCAmelCase_: Dict = current_node
UpperCAmelCase_: Any = previous_node
UpperCAmelCase_: Tuple = current_node
UpperCAmelCase_: Optional[Any] = self.front
UpperCAmelCase_: Any = previous_node
def __snake_case (self ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __snake_case (self ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase_: Optional[int] = self.rear.next
if self.rear:
UpperCAmelCase_: Any = data
def __snake_case (self ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase_: Union[str, Any] = self.front.data
UpperCAmelCase_: Any = None
return data
UpperCAmelCase_: str = self.front
UpperCAmelCase_: Union[str, Any] = old_front.next
UpperCAmelCase_: int = old_front.data
UpperCAmelCase_: Any = None
return data
def __snake_case (self ) -> None:
if self.is_empty():
raise Exception("""Empty Queue""" )
def __snake_case (self ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class _a :
def __init__(self ) -> None:
UpperCAmelCase_: Any | None = None
UpperCAmelCase_: Node | None = None
UpperCAmelCase_: Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
def wrapper(*UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any] ):
__lowerCamelCase = timeit.default_timer()
__lowerCamelCase = func(*__lowerCamelCase , **__lowerCamelCase )
__lowerCamelCase = timeit.default_timer() - starttime
return delta
__lowerCamelCase = func.__name__
return wrapper
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any=100 , UpperCamelCase__ : Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = seq_shapes or {}
for i in range(__lowerCamelCase ):
__lowerCamelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCamelCase , _ArrayXD ):
__lowerCamelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCamelCase , datasets.Value ):
if v.dtype == "string":
__lowerCamelCase = "The small grey turtle was surprisingly fast when challenged."
else:
__lowerCamelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCamelCase , datasets.Sequence ):
while isinstance(__lowerCamelCase , datasets.Sequence ):
__lowerCamelCase = v.feature
__lowerCamelCase = seq_shapes[k]
__lowerCamelCase = np.random.rand(*__lowerCamelCase ).astype(v.dtype )
__lowerCamelCase = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=100 , UpperCamelCase__ : str=None ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase )
with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer:
for key, record in dummy_data:
__lowerCamelCase = features.encode_example(__lowerCamelCase )
writer.write(__lowerCamelCase )
__lowerCamelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__lowerCamelCase = datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) )
return dataset
| 351 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowercase ( __magic_name__ ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowercase ( __magic_name__ ):
'''simple docstring'''
for char in word:
UpperCAmelCase : str = ord(__magic_name__ )
if not _is_chinese_char(__magic_name__ ):
return 0
return 1
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = set()
for token in tokens:
UpperCAmelCase : Union[str, Any] = len(__magic_name__ ) > 1 and is_chinese(__magic_name__ )
if chinese_word:
word_set.add(__magic_name__ )
UpperCAmelCase : Optional[Any] = list(__magic_name__ )
return word_list
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCAmelCase : str = max([len(__magic_name__ ) for w in chinese_word_set] )
UpperCAmelCase : Union[str, Any] = bert_tokens
UpperCAmelCase , UpperCAmelCase : Optional[Any] = 0, len(__magic_name__ )
while start < end:
UpperCAmelCase : List[Any] = True
if is_chinese(bert_word[start] ):
UpperCAmelCase : List[Any] = min(end - start , __magic_name__ )
for i in range(__magic_name__ , 1 , -1 ):
UpperCAmelCase : List[Any] = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase : Optional[int] = "##" + bert_word[j]
UpperCAmelCase : int = start + i
UpperCAmelCase : List[str] = False
break
if single_word:
start += 1
return bert_word
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = []
for i in range(0 , len(__magic_name__ ) , 100 ):
UpperCAmelCase : str = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
UpperCAmelCase : int = [get_chinese_word(__magic_name__ ) for r in res]
ltp_res.extend(__magic_name__ )
assert len(__magic_name__ ) == len(__magic_name__ )
UpperCAmelCase : int = []
for i in range(0 , len(__magic_name__ ) , 100 ):
UpperCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__magic_name__ , truncation=__magic_name__ , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__magic_name__ ) == len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Tuple = []
for id in input_ids:
UpperCAmelCase : Optional[Any] = bert_tokenizer._convert_id_to_token(__magic_name__ )
input_tokens.append(__magic_name__ )
UpperCAmelCase : List[Any] = add_sub_symbol(__magic_name__ , __magic_name__ )
UpperCAmelCase : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__magic_name__ ):
if token[:2] == "##":
UpperCAmelCase : List[str] = token[2:]
# save chinese tokens' pos
if len(__magic_name__ ) == 1 and _is_chinese_char(ord(__magic_name__ ) ):
ref_id.append(__magic_name__ )
ref_ids.append(__magic_name__ )
assert len(__magic_name__ ) == len(__magic_name__ )
return ref_ids
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
UpperCAmelCase : Dict = f.readlines()
UpperCAmelCase : List[str] = [line.strip() for line in data if len(__magic_name__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase : Union[str, Any] = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase : Dict = prepare_ref(__magic_name__ , __magic_name__ , __magic_name__ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
UpperCAmelCase : Tuple = [json.dumps(__magic_name__ ) + "\n" for ref in ref_ids]
f.writelines(__magic_name__ )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
a : Tuple = parser.parse_args()
main(args)
| 311 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : int = 1_00_00_00 ) -> int:
_SCREAMING_SNAKE_CASE = set(range(3 , lowerCamelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCamelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCamelCase_ , lowerCamelCase_ ) ) )
_SCREAMING_SNAKE_CASE = [float(lowerCamelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 356 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Dict:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> str:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 111 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE :Dict = """AutoImageProcessor"""
_SCREAMING_SNAKE_CASE :int = """AutoTokenizer"""
def __init__( self , _a=None , _a=None , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _a , )
SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def __call__( self , *_a , **_a ) -> Any:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop("""images""" , _a )
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("""text""" , _a )
if len(_a ) > 0:
SCREAMING_SNAKE_CASE__ : Tuple = args[0]
SCREAMING_SNAKE_CASE__ : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor(_a , *_a , **_a )
if text is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : str = encodings["""input_ids"""]
return inputs
def _a ( self , *_a , **_a ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*_a , **_a )
def _a ( self , *_a , **_a ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def _a ( self ) -> str:
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor
SCREAMING_SNAKE_CASE__ : str = False
def _a ( self , _a , _a=False , _a=None ) -> Dict:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
while tokens:
SCREAMING_SNAKE_CASE__ : int = re.search(r"""<s_(.*?)>""" , _a , re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : List[Any] = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : str = re.search(rf'''</s_{key}>''' , _a , re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : Optional[Any] = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens.replace(_a , """""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = end_token.group()
SCREAMING_SNAKE_CASE__ : Any = re.escape(_a )
SCREAMING_SNAKE_CASE__ : str = re.escape(_a )
SCREAMING_SNAKE_CASE__ : int = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , _a , re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
SCREAMING_SNAKE_CASE__ : Dict = value[0]
SCREAMING_SNAKE_CASE__ : str = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for leaf in content.split(r"""<sep/>""" ):
SCREAMING_SNAKE_CASE__ : Dict = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : Optional[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : Tuple = output[key][0]
SCREAMING_SNAKE_CASE__ : List[str] = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _a ( self ) -> str:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , )
return self.image_processor_class
@property
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , )
return self.image_processor
| 132 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-12 ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T
SCREAMING_SNAKE_CASE__ : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T
return jnp.matmul(__lowerCAmelCase , norm_emb_a.T )
class __a (nn.Module):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :CLIPConfig
_SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Dense(self.config.projection_dim , use_bias=_a , dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
SCREAMING_SNAKE_CASE__ : Any = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.vision_model(_a )[1]
SCREAMING_SNAKE_CASE__ : str = self.visual_projection(_a )
SCREAMING_SNAKE_CASE__ : List[str] = jax_cosine_distance(_a , self.special_care_embeds )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax_cosine_distance(_a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE__ : int = 0.0
SCREAMING_SNAKE_CASE__ : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE__ : Dict = jnp.round(_a , 3 )
SCREAMING_SNAKE_CASE__ : Dict = jnp.any(special_scores > 0 , axis=1 , keepdims=_a )
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE__ : Any = is_special_care * 0.01
SCREAMING_SNAKE_CASE__ : List[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.round(_a , 3 )
SCREAMING_SNAKE_CASE__ : List[str] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = CLIPConfig
_SCREAMING_SNAKE_CASE :Union[str, Any] = """clip_input"""
_SCREAMING_SNAKE_CASE :Dict = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , _a , _a = None , _a = 0 , _a = jnp.floataa , _a = True , **_a , ) -> Optional[int]:
"""simple docstring"""
if input_shape is None:
SCREAMING_SNAKE_CASE__ : List[Any] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE__ : Any = self.module_class(config=_a , dtype=_a , **_a )
super().__init__(_a , _a , input_shape=_a , seed=_a , dtype=_a , _do_init=_do_init )
def _a ( self , _a , _a , _a = None ) -> FrozenDict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = jax.random.normal(_a , _a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = jax.random.split(_a )
SCREAMING_SNAKE_CASE__ : List[str] = {"""params""": params_rng, """dropout""": dropout_rng}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.module.init(_a , _a )["""params"""]
return random_params
def __call__( self , _a , _a = None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = jnp.transpose(_a , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(_a , dtype=jnp.floataa ) , rngs={} , )
| 132 | 1 |
from __future__ import annotations
import numpy as np
def lowerCamelCase ( a_ ) -> Any:
return np.maximum(0 , UpperCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 362 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = {}, {}
if padding is not None:
lowerCAmelCase_ = padding
if truncation is not None:
lowerCAmelCase_ = truncation
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = {'image': image, 'question': question}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ )
return results
def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = load_image(inputs['image'] )
lowerCAmelCase_ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.sigmoid()[0]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 14 | 0 |
import os
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowerCamelCase = os.path.join(lowerCamelCase__ , """triangle.txt""" )
with open(lowerCamelCase__ ) as f:
lowerCamelCase = f.readlines()
lowerCamelCase = []
for line in triangle:
lowerCamelCase = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowerCamelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCamelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 252 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase__ = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = EfficientNetConfig()
lowercase__ : int = CONFIG_MAP[model_name]["hidden_dim"]
lowercase__ : Any = CONFIG_MAP[model_name]["width_coef"]
lowercase__ : Optional[Any] = CONFIG_MAP[model_name]["depth_coef"]
lowercase__ : List[str] = CONFIG_MAP[model_name]["image_size"]
lowercase__ : List[Any] = CONFIG_MAP[model_name]["dropout_rate"]
lowercase__ : Optional[int] = CONFIG_MAP[model_name]["dw_padding"]
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Any = "imagenet-1k-id2label.json"
lowercase__ : List[Any] = 1_000
lowercase__ : List[Any] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[str] = idalabel
lowercase__ : Dict = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : str = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = CONFIG_MAP[model_name]["image_size"]
lowercase__ : Optional[int] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowerCamelCase__ , )
return preprocessor
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowercase__ : Any = sorted(set(lowerCamelCase__ ) )
lowercase__ : List[Any] = len(lowerCamelCase__ )
lowercase__ : Dict = {b: str(lowerCamelCase__ ) for b, i in zip(lowerCamelCase__ , range(lowerCamelCase__ ) )}
lowercase__ : List[str] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowercase__ : Tuple = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowercase__ : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ : Tuple = "efficientnet." + item[1]
lowercase__ : Union[str, Any] = "classifier.weight"
lowercase__ : List[Any] = "classifier.bias"
return key_mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ : List[str] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ : int = torch.from_numpy(lowerCamelCase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ : Any = torch.from_numpy(lowerCamelCase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ : List[Any] = torch.from_numpy(np.transpose(lowerCamelCase__ ) )
else:
lowercase__ : Tuple = torch.from_numpy(lowerCamelCase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = model_classes[model_name](
include_top=lowerCamelCase__ , weights="imagenet" , input_tensor=lowerCamelCase__ , input_shape=lowerCamelCase__ , pooling=lowerCamelCase__ , classes=1_000 , classifier_activation="softmax" , )
lowercase__ : int = original_model.trainable_variables
lowercase__ : str = original_model.non_trainable_variables
lowercase__ : Optional[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ : Tuple = param.numpy()
lowercase__ : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ : int = get_efficientnet_config(lowerCamelCase__ )
lowercase__ : int = EfficientNetForImageClassification(lowerCamelCase__ ).eval()
lowercase__ : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowercase__ : str = rename_keys(lowerCamelCase__ )
replace_params(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Initialize preprocessor and preprocess input image
lowercase__ : Any = convert_image_processor(lowerCamelCase__ )
lowercase__ : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = hf_model(**lowerCamelCase__ )
lowercase__ : List[Any] = outputs.logits.detach().numpy()
# Original model inference
lowercase__ : Optional[int] = False
lowercase__ : Any = CONFIG_MAP[model_name]["image_size"]
lowercase__ : List[str] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ : Union[str, Any] = image.img_to_array(lowerCamelCase__ )
lowercase__ : List[Any] = np.expand_dims(lowerCamelCase__ , axis=0 )
lowercase__ : List[Any] = original_model.predict(lowerCamelCase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase__ ):
os.mkdir(lowerCamelCase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase__ )
preprocessor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ : List[str] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowerCamelCase__ )
hf_model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 130 | 0 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = ''''''
def lowerCamelCase_ ( _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = tweepy.OAuthHandler(_UpperCamelCase , _UpperCamelCase )
auth.set_access_token(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = tweepy.API(_UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
snake_case_ : Tuple = []
# make initial request for most recent tweets (200 is the maximum allowed count)
snake_case_ : Any = api.user_timeline(screen_name=_UpperCamelCase , count=200 )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# save the id of the oldest tweet less one
snake_case_ : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCamelCase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
snake_case_ : int = api.user_timeline(
screen_name=_UpperCamelCase , count=200 , max_id=_UpperCamelCase )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# update the id of the oldest tweet less one
snake_case_ : Optional[int] = alltweets[-1].id - 1
print(f'''...{len(_UpperCamelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
snake_case_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
snake_case_ : Dict = csv.writer(_UpperCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 279 |
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
snake_case_ : List[str] = len(bin(_UpperCamelCase )[3:] )
snake_case_ : str = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
snake_case_ : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 1 |
'''simple docstring'''
import os
import sys
import unittest
UpperCamelCase__: Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCamelCase__: int = os.path.join(git_repo_path, "src", "diffusers")
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> List[Any]:
UpperCAmelCase : Tuple = find_backend(''' if not is_torch_available():''' )
self.assertEqual(__snake_case , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
UpperCAmelCase : Optional[Any] = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(__snake_case , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
UpperCAmelCase : int = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(__snake_case , '''torch_and_transformers_and_onnx''' )
def A ( self : Optional[int] ) -> Dict:
UpperCAmelCase : Any = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __snake_case )
self.assertIn('''torch_and_transformers''' , __snake_case )
self.assertIn('''flax_and_transformers''' , __snake_case )
self.assertIn('''torch_and_transformers_and_onnx''' , __snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def A ( self : List[str] ) -> Dict:
UpperCAmelCase : Optional[int] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__snake_case , '''\nCONSTANT = None\n''' )
UpperCAmelCase : Optional[int] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__snake_case , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
UpperCAmelCase : Optional[int] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
UpperCAmelCase : List[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__snake_case , __snake_case )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase : Tuple = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
UpperCAmelCase : Tuple = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __snake_case )
| 23 | from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ )
else:
UpperCAmelCase : Union[str, Any] =None
UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : VQModel
__lowerCamelCase : CLIPTextModel
__lowerCamelCase : CLIPTokenizer
__lowerCamelCase : TransformeraDModel
__lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings
__lowerCamelCase : VQDiffusionScheduler
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1
# get prompt text embeddings
UpperCAmelCase : Optional[int] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase : int =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 )
else:
UpperCAmelCase : str =[''''''] * batch_size
UpperCAmelCase : Tuple =text_input_ids.shape[-1]
UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , )
UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1]
UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 )
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =1
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Tuple =len(snake_case__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' )
UpperCAmelCase : Tuple =batch_size * num_images_per_prompt
UpperCAmelCase : List[str] =guidance_scale > 1.0
UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(snake_case__ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1
UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCAmelCase : Any =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ , device=self.device )
UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device )
UpperCAmelCase : Optional[int] =latents
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 )
UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ )
UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase : Optional[Any] =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim
UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ )
UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample
UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ )
UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ )
UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ )
UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase : int =keep_mask[:, :-1, :]
UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase : Dict =log_p_x_0.clone()
UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0)
return rv
| 348 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __magic_name__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
__magic_name__ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def __magic_name__ ( self ) -> List[str]:
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def __magic_name__ ( self ) -> str:
torch.manual_seed(0 )
__magic_name__ : List[str] = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__magic_name__ : Any = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def __magic_name__ ( self ) -> Any:
__magic_name__ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Dict = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__magic_name__ : str = DDPMScheduler()
__magic_name__ : str = AudioDiffusionPipeline(vqvae=a__ , unet=self.dummy_unet , mel=a__ , scheduler=a__ )
__magic_name__ : str = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
__magic_name__ : int = torch.Generator(device=a__ ).manual_seed(42 )
__magic_name__ : Dict = pipe(generator=a__ , steps=4 )
__magic_name__ : List[str] = output.audios[0]
__magic_name__ : Dict = output.images[0]
__magic_name__ : int = torch.Generator(device=a__ ).manual_seed(42 )
__magic_name__ : Any = pipe(generator=a__ , steps=4 , return_dict=a__ )
__magic_name__ : Any = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__magic_name__ : Any = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__magic_name__ : Union[str, Any] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
__magic_name__ : Any = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__magic_name__ : Union[str, Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__magic_name__ : int = DDIMScheduler()
__magic_name__ : List[Any] = self.dummy_vqvae_and_unet
__magic_name__ : str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a__ , scheduler=a__ )
__magic_name__ : str = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
np.random.seed(0 )
__magic_name__ : List[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__magic_name__ : List[str] = torch.Generator(device=a__ ).manual_seed(42 )
__magic_name__ : int = pipe(raw_audio=a__ , generator=a__ , start_step=5 , steps=10 )
__magic_name__ : Dict = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__magic_name__ : Dict = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__magic_name__ : List[str] = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__magic_name__ : int = self.dummy_unet_condition
__magic_name__ : int = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a__ , mel=a__ , scheduler=a__ )
__magic_name__ : List[str] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
np.random.seed(0 )
__magic_name__ : int = torch.rand((1, 1, 10) )
__magic_name__ : Optional[Any] = pipe(generator=a__ , encoding=a__ )
__magic_name__ : Optional[int] = output.images[0]
__magic_name__ : Dict = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__magic_name__ : int = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = torch_device
__magic_name__ : List[str] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__magic_name__ : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
__magic_name__ : List[Any] = torch.Generator(device=a__ ).manual_seed(42 )
__magic_name__ : Union[str, Any] = pipe(generator=a__ )
__magic_name__ : Optional[Any] = output.audios[0]
__magic_name__ : Dict = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__magic_name__ : Dict = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__magic_name__ : List[str] = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 364 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__magic_name__: str = logging.getLogger(__name__)
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : int = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""", type=_A, default="""data/dump.txt""", help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""", type=_A, default="""bert""", choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""", type=_A, default="""bert-base-uncased""", help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""", type=_A, default="""data/dump""", help="""The dump file prefix.""" )
__magic_name__ : Dict = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
__magic_name__ : Tuple = BertTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
__magic_name__ : Optional[int] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
__magic_name__ : Optional[int] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
__magic_name__ : Any = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
__magic_name__ : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : int = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
__magic_name__ : Optional[int] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path, """r""", encoding="""utf8""" ) as fp:
__magic_name__ : Tuple = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f'{len(_A )} examples to process.' )
__magic_name__ : List[Any] = []
__magic_name__ : str = 0
__magic_name__ : str = 10000
__magic_name__ : Dict = time.time()
for text in data:
__magic_name__ : Tuple = f'{bos} {text.strip()} {sep}'
__magic_name__ : Optional[int] = tokenizer.encode(_A, add_special_tokens=_A )
rslt.append(_A )
iter += 1
if iter % interval == 0:
__magic_name__ : Union[str, Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
__magic_name__ : Any = time.time()
logger.info("""Finished binarization""" )
logger.info(f'{len(_A )} examples processed.' )
__magic_name__ : Tuple = f'{args.dump_file}.{args.tokenizer_name}.pickle'
__magic_name__ : Tuple = tokenizer.vocab_size
if vocab_size < (1 << 16):
__magic_name__ : Optional[int] = [np.uintaa(_A ) for d in rslt]
else:
__magic_name__ : str = [np.intaa(_A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(_A, """wb""" ) as handle:
pickle.dump(rslt_, _A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 138 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = checkpoint
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.conv_in.weight''']
SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict['''encoder.conv_in.bias''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.conv_out.weight''']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['''encoder.conv_out.bias''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.norm_out.weight''']
SCREAMING_SNAKE_CASE_ : str = vae_state_dict['''encoder.norm_out.bias''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''decoder.conv_in.weight''']
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict['''decoder.conv_in.bias''']
SCREAMING_SNAKE_CASE_ : Tuple = vae_state_dict['''decoder.conv_out.weight''']
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict['''decoder.conv_out.bias''']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['''decoder.norm_out.weight''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''decoder.norm_out.bias''']
SCREAMING_SNAKE_CASE_ : str = vae_state_dict['''quant_conv.weight''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''quant_conv.bias''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''post_quant_conv.weight''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE_ : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
SCREAMING_SNAKE_CASE_ : Tuple = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE_ : Optional[Any] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
SCREAMING_SNAKE_CASE_ : Tuple = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(__a )
}
for i in range(__a ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE_ : List[str] = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
SCREAMING_SNAKE_CASE_ : Optional[int] = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
SCREAMING_SNAKE_CASE_ : List[Any] = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = {'''old''': f'down.{i}.block', '''new''': f'down_blocks.{i}.resnets'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : str = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
SCREAMING_SNAKE_CASE_ : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE_ : List[str] = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : Dict = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : Dict = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = renew_vae_attention_paths(__a )
SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
SCREAMING_SNAKE_CASE_ : List[str] = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE_ : Tuple = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
SCREAMING_SNAKE_CASE_ : str = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''old''': f'up.{block_id}.block', '''new''': f'up_blocks.{i}.resnets'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : str = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
SCREAMING_SNAKE_CASE_ : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE_ : int = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE_ : Dict = renew_vae_attention_paths(__a )
SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def _A (__a , __a , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
SCREAMING_SNAKE_CASE_ : Dict = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE_ : Any = OmegaConf.load(__a )
SCREAMING_SNAKE_CASE_ : int = 5_12
SCREAMING_SNAKE_CASE_ : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE_ : Dict = {}
with safe_open(__a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE_ : Any = f.get_tensor(__a )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(__a , map_location=__a )['''state_dict''']
# Convert the VAE model.
SCREAMING_SNAKE_CASE_ : Any = create_vae_diffusers_config(__a , image_size=__a )
SCREAMING_SNAKE_CASE_ : int = custom_convert_ldm_vae_checkpoint(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
UpperCAmelCase_ : Any = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 91 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *A : Union[str, Any] , **A : Optional[int] ):
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 111 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : List[str] , a : Optional[NestedDataStructureLike[PathLike]] = None , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[int] = None , **a : str , )-> List[Any]:
"""simple docstring"""
lowercase__ = path_or_paths
lowercase__ = split if split or isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else "train"
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Dict , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[int] = None , **a : Optional[int] , )-> str:
"""simple docstring"""
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass
| 367 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spiece.model"""}
lowercase_ = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowercase_ = {
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , a : Tuple , a : Optional[int]=False , a : str=False , a : str=False , a : Tuple=None , a : Any=None , a : Union[str, Any]=None , a : Union[str, Any]=None , a : Optional[Dict[str, Any]] = None , **a : Optional[int] , )-> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
lowercase__ = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowercase__ = '<|endoftext|>' if eos_token is None else eos_token
lowercase__ = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowercase__ = unk_token if pad_token is None else pad_token
lowercase__ = eos_token if bos_token is None else bos_token
else:
lowercase__ = '<pad>' if pad_token is None else pad_token
lowercase__ = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
# Used for whitespace normalization in input texts
# fmt : off
lowercase__ = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowercase__ = re.compile(
f"""[{"".join(map(a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : Any )-> str:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : int , a : Optional[Any] )-> int:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str )-> str:
"""simple docstring"""
lowercase__ = self.non_printing_characters_re.sub('' , a )
# Normalize whitespaces
lowercase__ = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
lowercase__ = unicodedata.normalize('NFC' , a )
return text
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , **a : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = self.preprocess_text(a )
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str )-> int:
"""simple docstring"""
return self.sp_model.PieceToId(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int )-> str:
"""simple docstring"""
return self.sp_model.IdToPiece(a )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : str )-> str:
"""simple docstring"""
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[str] )-> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(a )
lowercase__ = False
out_string += self.sp_model.decode(a )
return out_string
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict[str, int]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Union[str, List[str]] , a : Union[str, bool] = False )-> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(a , a ):
lowercase__ = self.preprocess_text(a )
lowercase__ = self.sp_model.encode(a )
else:
lowercase__ = [self.preprocess_text(a ) for t in text]
lowercase__ = self.sp_model.encode(a )
if return_tensors is True or return_tensors == "pt":
lowercase__ = torch.tensor(a )
return token_ids
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[int, List[int]] )-> str:
"""simple docstring"""
return self.sp_model.decode(a )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : "Conversation" )-> List[int]:
"""simple docstring"""
lowercase__ = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowercase__ = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(a ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=a )
| 269 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase = default
else:
# KEY is set, convert it to True or False.
try:
lowercase = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase__ :Tuple = parse_flag_from_env("RUN_SLOW", default=False)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
lowercase__ :Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class lowercase ( unittest.TestCase ):
lowercase_ : Dict =True
@classmethod
def A__ ( cls):
lowercase = tempfile.mkdtemp()
@classmethod
def A__ ( cls):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def A__ ( self):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__)
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
def A__ ( self ,A__):
lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = AcceleratorState()
lowercase = tensor[None].clone().to(state.device )
lowercase = gather(lowerCAmelCase__ ).cpu()
lowercase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class lowercase :
def __init__( self ,A__ ,A__ ,A__):
lowercase = returncode
lowercase = stdout
lowercase = stderr
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase = []
lowercase = []
def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ):
lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ):
'''simple docstring'''
lowercase = asyncio.get_event_loop()
lowercase = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
lowercase = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
try:
lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
lowercase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 101 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 14 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Dict = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 284 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@dataclass
class lowercase_ ( __snake_case ):
_lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowercase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : List[str] = deprecated_arg[3:]
_snake_case : Optional[int] = not kwargs.pop(lowercase_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_snake_case : Tuple = kwargs.pop("tpu_name" , self.tpu_name )
_snake_case : Any = kwargs.pop("device_idx" , self.device_idx )
_snake_case : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
_snake_case : List[str] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowercase_ )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Name of TPU'} , )
_lowerCamelCase = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_lowerCamelCase = field(default=__snake_case , metadata={'help': 'Benchmark models in eager model.'} )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
_snake_case : str = None
if self.tpu:
try:
if self.tpu_name:
_snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case : Union[str, Any] = None
return tpu
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase ( self ):
return self.n_gpu > 0 | 284 | 1 |
import os
from collections.abc import Iterator
def lowerCamelCase_ ( _UpperCamelCase = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_UpperCamelCase ):
snake_case_ : List[str] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCamelCase , _UpperCamelCase ).lstrip('''./''' )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
return f'''{i * " "}*''' if i else "\n##"
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCamelCase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_UpperCamelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def lowerCamelCase_ ( _UpperCamelCase = "." ) -> None:
"""simple docstring"""
snake_case_ : Union[str, Any] = ''''''
for filepath in sorted(good_file_paths(_UpperCamelCase ) ):
snake_case_ , snake_case_ : Dict = os.path.split(_UpperCamelCase )
if filepath != old_path:
snake_case_ : List[str] = print_path(_UpperCamelCase , _UpperCamelCase )
snake_case_ : str = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case_ : int = f'''{filepath}/{filename}'''.replace(''' ''' , '''%20''' )
snake_case_ : List[Any] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'''{md_prefix(_UpperCamelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 279 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = SpeechTaTokenizer
lowerCamelCase_ : int = False
lowerCamelCase_ : Dict = True
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Tuple = SpeechTaTokenizer(__magic_name__ )
snake_case_ : Any = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ )
snake_case_ : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = '''this is a test'''
snake_case_ : int = '''this is a test'''
return input_text, output_text
def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : int = self.get_input_output_texts(__magic_name__ )
snake_case_ : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : Any = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
return text, ids
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = '''<pad>'''
snake_case_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__magic_name__ ) , 81 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : int = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ : List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
snake_case_ : List[Any] = tokenizer.add_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) )
snake_case_ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
snake_case_ : List[str] = tokenizer.add_special_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Dict = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) )
snake_case_ : Tuple = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : Optional[Any] = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ )
# fmt: off
self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
snake_case_ : List[Any] = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
| 279 | 1 |
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=1_3 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=9_9 , UpperCAmelCase_ : List[Any]=6_4 , UpperCAmelCase_ : Any=3_2 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Optional[int]=3_7 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=5_1_2 , UpperCAmelCase_ : Union[str, Any]=1_6 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Any=None , ):
"""simple docstring"""
a : str = parent
a : Any = batch_size
a : Optional[int] = seq_length
a : int = is_training
a : Union[str, Any] = use_input_mask
a : Optional[Any] = use_token_type_ids
a : Optional[Any] = use_labels
a : Optional[Any] = vocab_size
a : Optional[Any] = hidden_size
a : int = embedding_size
a : Dict = num_hidden_layers
a : Dict = num_attention_heads
a : List[Any] = intermediate_size
a : str = hidden_act
a : str = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Any = type_vocab_size
a : Optional[int] = type_sequence_label_size
a : Optional[int] = initializer_range
a : Optional[int] = num_labels
a : int = num_choices
a : Optional[int] = scope
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Tuple = None
if self.use_input_mask:
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_token_type_ids:
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : Union[str, Any] = None
a : Any = None
a : Union[str, Any] = None
if self.use_labels:
a : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
a : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = MobileBertModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
a : List[str] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Dict = MobileBertForMaskedLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str):
"""simple docstring"""
a : Optional[int] = MobileBertForNextSentencePrediction(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : str = MobileBertForPreTraining(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , next_sentence_label=UpperCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = MobileBertForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : str = self.num_labels
a : Tuple = MobileBertForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : str = self.num_labels
a : Union[str, Any] = MobileBertForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = self.num_choices
a : int = MobileBertForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : str = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Tuple = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Tuple = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Tuple = self.prepare_config_and_inputs()
(
a
) : Dict = config_and_inputs
a : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A : Any = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Optional[int] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class in get_values(UpperCAmelCase_):
a : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_)
a : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : List[str] = MobileBertModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return torch.tensor(
snake_case , dtype=torch.long , device=snake_case , )
UpperCamelCase : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased').to(UpperCAmelCase_)
a : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
a : str = model(UpperCAmelCase_)[0]
a : str = torch.Size((1, 9, 5_1_2))
self.assertEqual(output.shape , UpperCAmelCase_)
a : Tuple = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
] , device=UpperCAmelCase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE)
a : List[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE)
self.assertTrue(lower_bound and upper_bound)
| 368 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : List[Any] = 'ZinengTang/tvlt-base'
lowercase__ : List[Any] = tempfile.mkdtemp()
def _UpperCAmelCase ( self , **a ) -> Any:
return TvltImageProcessor.from_pretrained(self.checkpoint , **a )
def _UpperCAmelCase ( self , **a ) -> Union[str, Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a )
def _UpperCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : int = self.get_feature_extractor()
lowercase__ : Optional[Any] = TvltProcessor(image_processor=a , feature_extractor=a )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , a )
self.assertIsInstance(processor.image_processor , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = self.get_image_processor()
lowercase__ : Dict = self.get_feature_extractor()
lowercase__ : Optional[Any] = TvltProcessor(image_processor=a , feature_extractor=a )
lowercase__ : Optional[Any] = np.ones([1_2_0_0_0] )
lowercase__ : str = feature_extractor(a , return_tensors='np' )
lowercase__ : Union[str, Any] = processor(audio=a , return_tensors='np' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : int = self.get_feature_extractor()
lowercase__ : int = TvltProcessor(image_processor=a , feature_extractor=a )
lowercase__ : Tuple = np.ones([3, 2_2_4, 2_2_4] )
lowercase__ : Dict = image_processor(a , return_tensors='np' )
lowercase__ : List[str] = processor(images=a , return_tensors='np' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Tuple = self.get_feature_extractor()
lowercase__ : Union[str, Any] = TvltProcessor(image_processor=a , feature_extractor=a )
lowercase__ : Optional[Any] = np.ones([1_2_0_0_0] )
lowercase__ : Any = np.ones([3, 2_2_4, 2_2_4] )
lowercase__ : Union[str, Any] = processor(audio=a , images=a )
self.assertListEqual(list(inputs.keys() ) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Optional[int] = self.get_feature_extractor()
lowercase__ : Any = TvltProcessor(image_processor=a , feature_extractor=a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 77 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=0 , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : Optional[int] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Optional[Any] = use_input_mask
lowerCAmelCase : Union[str, Any] = use_token_type_ids
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Dict = num_labels
lowerCAmelCase : List[Any] = num_choices
lowerCAmelCase : Optional[Any] = scope
lowerCAmelCase : Optional[Any] = projection_dim
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Any = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
lowerCAmelCase : Tuple = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ):
lowerCAmelCase : int = TFDPRContextEncoder(config=UpperCAmelCase_ )
lowerCAmelCase : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : List[str] = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : int = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[int] = TFDPRReader(config=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase : str = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : str = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = TFDPRModelTester(self )
lowerCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Any = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[int] = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
lowerCAmelCase : List[Any] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase : List[Any] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 138 | 0 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _snake_case ( _SCREAMING_SNAKE_CASE : Dataset , _SCREAMING_SNAKE_CASE : Dict[str, str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = args.log_outputs
lowerCAmelCase = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
lowerCAmelCase = load_metric("""wer""" )
lowerCAmelCase = load_metric("""cer""" )
# compute metrics
lowerCAmelCase = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
lowerCAmelCase = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
lowerCAmelCase = f'WER: {wer_result}\nCER: {cer_result}'
print(_SCREAMING_SNAKE_CASE )
with open(f'{dataset_id}_eval_results.txt' , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase = f'log_{dataset_id}_predictions.txt'
lowerCAmelCase = f'log_{dataset_id}_targets.txt'
with open(_SCREAMING_SNAKE_CASE , """w""" ) as p, open(_SCREAMING_SNAKE_CASE , """w""" ) as t:
# mapping function to write output
def write_to_file(_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ):
p.write(f'{i}' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f'{i}' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(_SCREAMING_SNAKE_CASE , with_indices=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
lowerCAmelCase = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase = re.sub(_SCREAMING_SNAKE_CASE , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
lowerCAmelCase = """ """.join(text.split(_SCREAMING_SNAKE_CASE ) )
return text
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
# load dataset
lowerCAmelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_SCREAMING_SNAKE_CASE )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase = dataset.cast_column("""audio""" , Audio(sampling_rate=_SCREAMING_SNAKE_CASE ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase = 0 if torch.cuda.is_available() else -1
lowerCAmelCase = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_SCREAMING_SNAKE_CASE : str ):
lowerCAmelCase = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase = prediction["""text"""]
lowerCAmelCase = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
lowerCAmelCase = dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
UpperCAmelCase = parser.parse_args()
main(args) | 187 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 999_999_999
lowerCAmelCase = 0
lowerCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase = remaining_time[j]
lowerCAmelCase = j
lowerCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase = remaining_time[short]
if minm == 0:
lowerCAmelCase = 999_999_999
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase = False
# Find finish time of current process
lowerCAmelCase = increment_time + 1
# Calculate waiting time
lowerCAmelCase = finish_time - arrival_time[short]
lowerCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = total_waiting_time + waiting_time[i]
lowerCAmelCase = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
UpperCAmelCase = int(input())
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
UpperCAmelCase , UpperCAmelCase = map(int, input().split())
UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase = burst_time
UpperCAmelCase = no_of_processes
UpperCAmelCase = waiting_time
UpperCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs) | 187 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "new-model"
if is_tf_available():
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = NewModelConfig
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] ="bert-base-cased"
lowerCamelCase__: Tuple =AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Any =TFAutoModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any ="bert-base-cased"
lowerCamelCase__: Tuple =AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: str =AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[str] =TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Optional[int] =TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: List[Any] =AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Any =TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Optional[int] =AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Dict =TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: List[str] =TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: str =AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: str =TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowerCamelCase__: Dict =AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Dict =TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowerCamelCase__: str =AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
@slow
@require_tensorflow_probability
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[str]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase__: Tuple =AutoConfig.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: List[str] =TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(model.num_parameters() , 14_410)
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_) , 14_410)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(model.num_parameters() , 14_410)
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_) , 14_410)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =TFAutoModel.from_pretrained("sgugger/funnel-random-tiny")
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: str =copy.deepcopy(model.config)
lowerCamelCase__: Dict =["FunnelBaseModel"]
lowerCamelCase__: List[Any] =TFAutoModel.from_config(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: List[Any] =TFAutoModel.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
try:
AutoConfig.register("new-model" , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase_):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_)
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_)
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase__: Tuple =BertModelTester(self).get_config()
lowerCamelCase__: List[str] =NewModelConfig(**tiny_config.to_dict())
lowerCamelCase__: List[Any] =auto_class.from_config(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: Tuple =auto_class.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier"):
lowerCamelCase__: Optional[Any] =TFAutoModel.from_pretrained("bert-base")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
lowerCamelCase__: Optional[int] =TFAutoModel.from_pretrained(UpperCAmelCase_ , revision="aaaaaa")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
lowerCamelCase__: Union[str, Any] =TFAutoModel.from_pretrained("hf-internal-testing/config-no-model")
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , "Use `from_pt=True` to load this model"):
lowerCamelCase__: List[Any] =TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
lowerCamelCase__: List[str] =TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
lowerCamelCase__: Optional[int] =TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
with RequestCounter() as counter:
lowerCamelCase__: List[Any] =TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 10 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 269 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase :
'''simple docstring'''
lowercase : Dict =MBartConfig
lowercase : Union[str, Any] ={}
lowercase : Optional[int] ="""gelu"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
lowercase_ :int = parent
lowercase_ :Any = batch_size
lowercase_ :Any = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Optional[Any] = use_labels
lowercase_ :List[str] = vocab_size
lowercase_ :Union[str, Any] = hidden_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :Optional[int] = num_attention_heads
lowercase_ :Any = intermediate_size
lowercase_ :str = hidden_dropout_prob
lowercase_ :List[Any] = attention_probs_dropout_prob
lowercase_ :Union[str, Any] = max_position_embeddings
lowercase_ :str = eos_token_id
lowercase_ :List[Any] = pad_token_id
lowercase_ :List[str] = bos_token_id
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ :Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ :Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ :Optional[Any] = prepare_mbart_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Tuple = TFMBartModel(config=UpperCamelCase_ ).get_decoder()
lowercase_ :Any = inputs_dict['''input_ids''']
lowercase_ :List[Any] = input_ids[:1, :]
lowercase_ :List[Any] = inputs_dict['''attention_mask'''][:1, :]
lowercase_ :str = inputs_dict['''head_mask''']
lowercase_ :List[str] = 1
# first forward pass
lowercase_ :Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
lowercase_ , lowercase_ :int = outputs.to_tuple()
lowercase_ :List[Any] = past_key_values[1]
def UpperCamelCase ( _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
lowercase_ :Dict = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ :Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ :Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ :Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ :Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase : Optional[Any] =(TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase : Optional[Any] =(
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : Optional[Any] =True
lowercase : Optional[Any] =False
lowercase : List[str] =False
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = TFMBartModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
lowercase_ :str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] =[
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowercase : Optional[int] =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowercase : Any ="""facebook/mbart-large-en-ro"""
@cached_property
def UpperCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self ):
lowercase_ :Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Any = self.translate_src_text(**UpperCamelCase_ )
self.assertListEqual(self.expected_text , UpperCamelCase_ )
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Optional[Any] = self.tokenizer(self.src_text , **UpperCamelCase_ , return_tensors='''tf''' )
lowercase_ :Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowercase_ :Any = self.tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
return generated_words
@slow
def UpperCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 252 | 0 |
import os
from collections.abc import Iterator
def a_ ( lowerCAmelCase_ : str = "." ):
for dir_path, dir_names, filenames in os.walk(lowerCAmelCase_ ):
__lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCAmelCase_ )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCAmelCase_, lowerCAmelCase_ ).lstrip('./' )
def a_ ( lowerCAmelCase_ : Dict ):
return F"""{i * " "}*""" if i else "\n##"
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCAmelCase_ ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(lowerCAmelCase_ )} {new_part.replace("_", " " ).title()}""" )
return new_path
def a_ ( lowerCAmelCase_ : str = "." ):
__lowerCAmelCase = ''
for filepath in sorted(good_file_paths(lowerCAmelCase_ ) ):
__lowerCAmelCase , __lowerCAmelCase = os.path.split(lowerCAmelCase_ )
if filepath != old_path:
__lowerCAmelCase = print_path(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCAmelCase = F"""{filepath}/{filename}""".replace(' ', '%20' )
__lowerCAmelCase = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(F"""{md_prefix(lowerCAmelCase_ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 284 |
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : list[int], lowerCAmelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
return min(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(lowerCAmelCase_ ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 284 | 1 |
"""simple docstring"""
import math
import qiskit
def __A ( a_ :int = 1 , a_ :int = 1 , a_ :int = 1) -> qiskit.result.counts.Counts:
if (
isinstance(a_ , a_)
or isinstance(a_ , a_)
or isinstance(a_ , a_)
):
raise TypeError('''inputs must be integers.''')
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''')
if (
(math.floor(a_) != input_a)
or (math.floor(a_) != input_a)
or (math.floor(a_) != carry_in)
):
raise ValueError('''inputs must be exact integers.''')
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''')
# build registers
__a : str = qiskit.QuantumRegister(4 , '''qr''')
__a : List[Any] = qiskit.ClassicalRegister(2 , '''cr''')
# list the entries
__a : Any = [input_a, input_a, carry_in]
__a : List[Any] = qiskit.QuantumCircuit(a_ , a_)
for i in range(0 , 3):
if entry[i] == 2:
quantum_circuit.h(a_) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a_) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a_) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3) # ccx = toffoli gate
quantum_circuit.cx(0 , 1)
quantum_circuit.ccx(1 , 2 , 3)
quantum_circuit.cx(1 , 2)
quantum_circuit.cx(0 , 1)
quantum_circuit.measure([2, 3] , a_) # measure the last two qbits
__a : int = qiskit.Aer.get_backend('''aer_simulator''')
__a : Union[str, Any] = qiskit.execute(a_ , a_ , shots=10_00)
return job.result().get_counts(a_)
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}') | 354 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
A = datasets.logging.get_logger(__name__)
A = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
A = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
A = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.config_name == "default":
__a : List[str] = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
__a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ):
if gpus is None:
__a : str = 1 if torch.cuda.is_available() else 0
__a : Optional[Any] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
__a : Dict = [dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) for t in zip(*data.values() )]
__a , __a : int = self.scorer.predict(_UpperCAmelCase , gpus=_UpperCAmelCase , progress_bar=_UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores} | 188 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_SCREAMING_SNAKE_CASE = """true"""
def lowercase( UpperCamelCase_ , UpperCamelCase_=82 , UpperCamelCase_=16 ) -> List[str]:
'''simple docstring'''
set_seed(42 )
UpperCamelCase = RegressionModel()
UpperCamelCase = deepcopy(_a )
UpperCamelCase = RegressionDataset(length=_a )
UpperCamelCase = DataLoader(_a , batch_size=_a )
model.to(accelerator.device )
UpperCamelCase = accelerator.prepare(_a , _a )
return model, ddp_model, dataloader
def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> int:
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(UpperCamelCase_ ):
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a )
return outputs
with accelerator.main_process_first():
UpperCamelCase = dataset.map(
_a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ ):
if use_longest:
return tokenizer.pad(_a , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_a , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16 )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = Accelerator(dispatch_batches=_a , split_batches=_a )
UpperCamelCase = get_dataloader(_a , not dispatch_batches )
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_a )
UpperCamelCase = accelerator.prepare(_a , _a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = []
for batch in dataloader:
UpperCamelCase = batch.values()
with torch.no_grad():
UpperCamelCase = model(_a )
UpperCamelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCamelCase = [], []
for logit, targ in logits_and_targets:
logits.append(_a )
targs.append(_a )
UpperCamelCase = torch.cat(_a ), torch.cat(_a )
return logits, targs
def lowercase( UpperCamelCase_ , UpperCamelCase_=82 , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=16 ) -> Dict:
'''simple docstring'''
UpperCamelCase = get_basic_setup(_a , _a , _a )
UpperCamelCase = generate_predictions(_a , _a , _a )
assert (
len(_a ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a )}"""
def lowercase( UpperCamelCase_ = False , UpperCamelCase_ = False ) -> Tuple:
'''simple docstring'''
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
UpperCamelCase = get_mrpc_setup(_a , _a )
# First do baseline
UpperCamelCase = setup["""no"""]
model.to(_a )
model.eval()
for batch in dataloader:
batch.to(_a )
with torch.inference_mode():
UpperCamelCase = model(**_a )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_a , references=batch["""labels"""] )
UpperCamelCase = metric.compute()
# Then do distributed
UpperCamelCase = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCamelCase = model(**_a )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase = batch["""labels"""]
UpperCamelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_a , references=_a )
UpperCamelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowercase( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_a , _a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCamelCase = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
UpperCamelCase = Accelerator()
test_torch_metrics(_a , 512 )
accelerator.state._reset_state()
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 343 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : ClassVar[Features] = Features({"audio": Audio()} )
A__ : ClassVar[Features] = Features({"transcription": Value("string" )} )
A__ : str = "audio"
A__ : str = "transcription"
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase_ : Any = copy.deepcopy(self )
UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy()
UpperCAmelCase_ : Any = features[self.audio_column]
UpperCAmelCase_ : Union[str, Any] = input_schema
return task_template
@property
def A__ ( self: List[str] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 345 | 0 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def UpperCamelCase ( _A, _A, _A, _A=None ):
"""simple docstring"""
__magic_name__ : Dict = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__magic_name__ ,__magic_name__ : Tuple = True, True
__magic_name__ : Dict = dfs(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return path
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = -1
for i in range(UpperCamelCase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__magic_name__ : Union[str, Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : str = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__magic_name__ ,__magic_name__ : Optional[Any] = check_circuit_or_path(UpperCamelCase__, UpperCamelCase__ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
__magic_name__ : Optional[Any] = 1
if check == 2:
__magic_name__ : int = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
__magic_name__ : Dict = dfs(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
print(UpperCamelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__magic_name__ : Dict = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__magic_name__ : int = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__magic_name__ : Optional[int] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__magic_name__ : str = {
1: [],
2: []
# all degree is zero
}
__magic_name__ : Dict = 10
check_euler(UpperCamelCase__, UpperCamelCase__ )
check_euler(UpperCamelCase__, UpperCamelCase__ )
check_euler(UpperCamelCase__, UpperCamelCase__ )
check_euler(UpperCamelCase__, UpperCamelCase__ )
check_euler(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main()
| 367 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__magic_name__: int = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def UpperCamelCase ( _A, _A=None ):
"""simple docstring"""
require_version(deps[pkg], _A )
| 138 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowercase__ : List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , *__lowercase : Optional[int] , **__lowercase : int ):
"""simple docstring"""
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 187 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase__ : Tuple = re.compile(R"\b(a|an|the)\b", re.UNICODE)
lowercase__ : Optional[int] = None
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_A , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_A , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def remove_articles(_A ):
return ARTICLES_REGEX.sub(" " , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_A ).split()
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = get_tokens(_A )
snake_case_ = get_tokens(_A )
snake_case_ = collections.Counter(_A ) & collections.Counter(_A )
snake_case_ = sum(common.values() )
if len(_A ) == 0 or len(_A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = qa["id"]
snake_case_ = [t for t in qa["answers"]["text"] if normalize_answer(_A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case_ = [""]
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
snake_case_ = preds[qid]
# Take max over all gold answers
snake_case_ = max(compute_exact(_A , _A ) for a in gold_answers )
snake_case_ = max(compute_fa(_A , _A ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = {}
for qid, s in scores.items():
snake_case_ = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case_ = float(not qid_to_has_ans[qid] )
else:
snake_case_ = s
return new_scores
def lowerCamelCase__ ( _A , _A , _A=None ):
'''simple docstring'''
if not qid_list:
snake_case_ = len(_A )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores.values() ) / total),
("f1", 1_00.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
snake_case_ = len(_A )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
for k in new_eval:
snake_case_ = new_eval[k]
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
plt.step(_A , _A , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_A , _A , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_A )
plt.savefig(_A )
plt.clf()
def lowerCamelCase__ ( _A , _A , _A , _A , _A=None , _A=None ):
'''simple docstring'''
snake_case_ = sorted(_A , key=lambda _A : na_probs[k] )
snake_case_ = 0.0
snake_case_ = 1.0
snake_case_ = 0.0
snake_case_ = [1.0]
snake_case_ = [0.0]
snake_case_ = 0.0
for i, qid in enumerate(_A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case_ = true_pos / float(i + 1 )
snake_case_ = true_pos / float(_A )
if i == len(_A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_A )
recalls.append(_A )
if out_image:
plot_pr_curve(_A , _A , _A , _A )
return {"ap": 1_00.0 * avg_prec}
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_A ):
os.makedirs(_A )
snake_case_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case_ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
snake_case_ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
snake_case_ = {k: float(_A ) for k, v in qid_to_has_ans.items()}
snake_case_ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_A , _A , "pr_exact" )
merge_eval(_A , _A , "pr_f1" )
merge_eval(_A , _A , "pr_oracle" )
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
if not qid_list:
return
snake_case_ = [na_probs[k] for k in qid_list]
snake_case_ = np.ones_like(_A ) / float(len(_A ) )
plt.hist(_A , weights=_A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(_A , f"na_prob_hist_{name}.png" ) )
plt.clf()
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case_ = num_no_ans
snake_case_ = cur_score
snake_case_ = 0.0
snake_case_ = sorted(_A , key=lambda _A : na_probs[k] )
for i, qid in enumerate(_A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case_ = scores[qid]
else:
if preds[qid]:
snake_case_ = -1
else:
snake_case_ = 0
cur_score += diff
if cur_score > best_score:
snake_case_ = cur_score
snake_case_ = na_probs[qid]
return 1_00.0 * best_score / len(_A ), best_thresh
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ , snake_case_ = find_best_thresh(_A , _A , _A , _A )
snake_case_ , snake_case_ = find_best_thresh(_A , _A , _A , _A )
snake_case_ = best_exact
snake_case_ = exact_thresh
snake_case_ = best_fa
snake_case_ = fa_thresh
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case_ = json.load(_A )
snake_case_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
snake_case_ = json.load(_A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case_ = json.load(_A )
else:
snake_case_ = {k: 0.0 for k in preds}
snake_case_ = make_qid_to_has_ans(_A ) # maps qid to True/False
snake_case_ = [k for k, v in qid_to_has_ans.items() if v]
snake_case_ = [k for k, v in qid_to_has_ans.items() if not v]
snake_case_ , snake_case_ = get_raw_scores(_A , _A )
snake_case_ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
snake_case_ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
snake_case_ = make_eval_dict(_A , _A )
if has_ans_qids:
snake_case_ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , "HasAns" )
if no_ans_qids:
snake_case_ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_A , _A , _A , _A , _A , _A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_A , _A , _A , _A , _A , OPTS.out_image_dir )
histogram_na_prob(_A , _A , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_A , _A , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_A , _A )
else:
print(json.dumps(_A , indent=2 ) )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 187 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A (unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self : int ) -> List[Any]:
"""simple docstring"""
A__ = self.dummy_uncond_unet
A__ = ScoreSdeVeScheduler()
A__ = ScoreSdeVePipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
sde_ve.to(__lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=__lowerCAmelCase )
A__ = torch.manual_seed(0 )
A__ = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__lowerCAmelCase ).images
A__ = torch.manual_seed(0 )
A__ = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__lowerCAmelCase , return_dict=__lowerCAmelCase )[
0
]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : int ) -> Dict:
"""simple docstring"""
A__ = """google/ncsnpp-church-256"""
A__ = UNetaDModel.from_pretrained(__lowerCAmelCase )
A__ = ScoreSdeVeScheduler.from_pretrained(__lowerCAmelCase )
A__ = ScoreSdeVePipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
sde_ve.to(__lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=__lowerCAmelCase )
A__ = torch.manual_seed(0 )
A__ = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__lowerCAmelCase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 276 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple=13 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=33 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : List[Any]=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[Any]=5_12 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Tuple=None , ) -> int:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
A__ = EsmModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
A__ = EsmForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
A__ = self.num_labels
A__ = EsmForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : Any ) -> Dict:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = False
__lowerCamelCase : Union[str, Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : List[Any] = ()
__lowerCamelCase : Optional[int] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Any = True
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = EsmModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def a_ ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = EsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()[0]
A__ = EsmEmbeddings(config=__lowerCAmelCase )
A__ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
A__ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
A__ = create_position_ids_from_input_ids(__lowerCAmelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCAmelCase , __lowerCAmelCase ) ) )
def a_ ( self : List[Any] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()[0]
A__ = EsmEmbeddings(config=__lowerCAmelCase )
A__ = torch.empty(2 , 4 , 30 )
A__ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
A__ = torch.as_tensor([expected_single_positions, expected_single_positions] )
A__ = embeddings.create_position_ids_from_inputs_embeds(__lowerCAmelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCAmelCase , __lowerCAmelCase ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@require_torch
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
A__ = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
A__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ = model(__lowerCAmelCase )[0]
A__ = 33
A__ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
A__ = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def a_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
A__ = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
A__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 276 | 1 |
import numpy as np
from transformers import Pipeline
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = np.max(UpperCamelCase__ , axis=-1 , keepdims=UpperCamelCase__ )
__lowerCamelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = {}
if "second_text" in kwargs:
__lowerCamelCase = kwargs['second_text']
return preprocess_kwargs, {}, {}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Tuple:
'''simple docstring'''
return self.tokenizer(lowerCamelCase__ , text_pair=lowerCamelCase__ , return_tensors=self.framework )
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.model(**lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = model_outputs.logits[0].numpy()
__lowerCamelCase = softmax(lowerCamelCase__ )
__lowerCamelCase = np.argmax(lowerCamelCase__ )
__lowerCamelCase = self.model.config.idalabel[best_class]
__lowerCamelCase = probabilities[best_class].item()
__lowerCamelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 90 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : List[str] = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 0 |
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: float , __UpperCAmelCase: int ) -> float:
UpperCamelCase__ : Dict = x
UpperCamelCase__ : Dict = y
for step in range(__UpperCAmelCase ): # noqa: B007
UpperCamelCase__ : Union[str, Any] = a * a - b * b + x
UpperCamelCase__ : Dict = 2 * a * b + y
UpperCamelCase__ : Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( __UpperCAmelCase: float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCAmelCase_ ( __UpperCAmelCase: float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__UpperCAmelCase , 1 , 1 ) )
def lowerCAmelCase_ ( __UpperCAmelCase: int = 800 , __UpperCAmelCase: int = 600 , __UpperCAmelCase: float = -0.6 , __UpperCAmelCase: float = 0 , __UpperCAmelCase: float = 3.2 , __UpperCAmelCase: int = 50 , __UpperCAmelCase: bool = True , ) -> Image.Image:
UpperCamelCase__ : str = Image.new('''RGB''' , (image_width, image_height) )
UpperCamelCase__ : Optional[int] = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCAmelCase ):
for image_y in range(__UpperCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase__ : Union[str, Any] = figure_width / image_width * image_height
UpperCamelCase__ : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase__ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase__ : str = get_distance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase__ : List[str] = get_color_coded_rgb(__UpperCAmelCase )
else:
UpperCamelCase__ : List[Any] = get_black_and_white_rgb(__UpperCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 247 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> int:
UpperCamelCase__ : Optional[Any] = git.Repo(search_parent_directories=__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = {
'''repo_id''': str(__UpperCAmelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(__UpperCAmelCase , '''git_log.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=4 )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Dict:
if params.n_gpu <= 0:
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Union[str, Any] = -1
UpperCamelCase__ : str = True
UpperCamelCase__ : Dict = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCamelCase__ : Optional[int] = int(os.environ['''WORLD_SIZE'''] )
UpperCamelCase__ : Any = int(os.environ['''N_GPU_NODE'''] )
UpperCamelCase__ : Optional[Any] = int(os.environ['''RANK'''] )
# number of nodes / node ID
UpperCamelCase__ : Optional[int] = params.world_size // params.n_gpu_per_node
UpperCamelCase__ : int = params.global_rank // params.n_gpu_per_node
UpperCamelCase__ : Any = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : int = 1
UpperCamelCase__ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCamelCase__ : Any = params.node_id == 0 and params.local_rank == 0
UpperCamelCase__ : Optional[int] = params.n_nodes > 1
# summary
UpperCamelCase__ : List[Any] = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Tuple:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 247 | 1 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 1000000 ) -> Optional[Any]:
__lowercase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _A ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 325 |
from __future__ import annotations
def UpperCAmelCase__ ( _A : float , _A : float , _A : float , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
# Load checkpoint
UpperCamelCase_ = torch.load(UpperCamelCase_ , map_location="cpu" )
UpperCamelCase_ = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
UpperCamelCase_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase_ = v
else:
UpperCamelCase_ = v
UpperCamelCase_ = chkpt["params"]
UpperCamelCase_ = {n: v for n, v in config.items() if not isinstance(UpperCamelCase_ , (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase_ = chkpt["dico_word2id"]
UpperCamelCase_ = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
UpperCamelCase_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCamelCase_ , indent=2 ) + "\n" )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCamelCase_ , indent=2 ) + "\n" )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCAmelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 360 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 5_1_2,
}
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str="<pad>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: int="<mask_2>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask_1>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=103 , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> None:
"""simple docstring"""
UpperCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'''
f''' {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCamelCase_ = additional_special_tokens_extended
else:
UpperCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = mask_token_sent
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
UpperCamelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
@property
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowercase ( self: int ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCamelCase_ = self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCamelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCamelCase_ = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
return 1
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List , _SCREAMING_SNAKE_CASE: Optional[List] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 328 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "huggingface/label-files"
_lowerCAmelCase : int = "imagenet-1k-id2label.json"
_lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowerCAmelCase : Optional[int] = BitConfig(
conv_layer=_lowerCamelCase , num_labels=1_000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def A ( _lowerCamelCase ):
'''simple docstring'''
if "stem.conv" in name:
_lowerCAmelCase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_lowerCAmelCase : Any = name.replace("blocks" , "layers" )
if "head.fc" in name:
_lowerCAmelCase : Optional[Any] = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
_lowerCAmelCase : Any = "bit." + name
if "bit" not in name and "classifier" not in name:
_lowerCAmelCase : Dict = "bit.encoder." + name
return name
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_config(_lowerCamelCase )
# load original model from timm
_lowerCAmelCase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model
_lowerCAmelCase : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val.squeeze() if "head" in key else val
# load HuggingFace model
_lowerCAmelCase : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
_lowerCAmelCase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = transform.transforms
_lowerCAmelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_lowerCAmelCase : Tuple = BitImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Any = transform(_lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
_lowerCAmelCase : Tuple = model(_lowerCamelCase )
_lowerCAmelCase : str = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowerCAmelCase : Union[str, Any] = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
_snake_case = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=lowerCAmelCase )
class __A :
lowerCAmelCase_ : str
lowerCAmelCase_ : str
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
@dataclass(frozen=lowerCAmelCase )
class __A :
lowerCAmelCase_ : List[int]
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[Union[int, float]] = None
lowerCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[InputFeatures]
def __init__( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str=False , UpperCAmelCase_ : bool = False , ):
lowerCAmelCase : List[Any] = hans_processors[task]()
lowerCAmelCase : Tuple = os.path.join(
UpperCAmelCase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase_ ) , UpperCAmelCase_ , ) , )
lowerCAmelCase : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase , lowerCAmelCase : List[Any] = label_list[2], label_list[1]
lowerCAmelCase : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase : Any = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowerCAmelCase : int = torch.load(UpperCAmelCase_ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowerCAmelCase : Optional[int] = (
processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
)
logger.info('Training examples: %s' , len(UpperCAmelCase_ ) )
lowerCAmelCase : List[str] = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info('Saving features into cached file %s' , UpperCAmelCase_ )
torch.save(self.features , UpperCAmelCase_ )
def __len__( self : str ):
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : List[str] ):
return self.features[i]
def lowercase__ ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
lowerCAmelCase_ : List[InputFeatures]
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = 128 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : bool = False , ):
lowerCAmelCase : List[Any] = hans_processors[task]()
lowerCAmelCase : List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase , lowerCAmelCase : int = label_list[2], label_list[1]
lowerCAmelCase : str = label_list
lowerCAmelCase : Union[str, Any] = processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCAmelCase : Tuple = tf.data.Dataset.from_generator(
UpperCAmelCase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowercase__ ( self : Dict ):
return self.dataset
def __len__( self : Optional[int] ):
return len(self.features )
def __getitem__( self : int , UpperCAmelCase_ : List[Any] ):
return self.features[i]
def lowercase__ ( self : int ):
return self.label_list
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Dict , UpperCAmelCase_ : Dict ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Any ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def lowercase__ ( self : Optional[Any] ):
return ["contradiction", "entailment", "neutral"]
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = []
for i, line in enumerate(UpperCAmelCase_ ):
if i == 0:
continue
lowerCAmelCase : Union[str, Any] = '%s-%s' % (set_type, line[0])
lowerCAmelCase : Optional[int] = line[5]
lowerCAmelCase : Optional[int] = line[6]
lowerCAmelCase : Dict = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCAmelCase : List[str] = line[0]
examples.append(InputExample(guid=UpperCAmelCase_ , text_a=UpperCAmelCase_ , text_b=UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
return examples
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ) -> Dict:
'''simple docstring'''
lowerCAmelCase : List[Any] = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCAmelCase : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ), desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCAmelCase : Any = tokenizer(
example.text_a, example.text_b, add_special_tokens=_UpperCAmelCase, max_length=_UpperCAmelCase, padding='max_length', truncation=_UpperCAmelCase, return_overflowing_tokens=_UpperCAmelCase, )
lowerCAmelCase : Union[str, Any] = label_map[example.label] if example.label in label_map else 0
lowerCAmelCase : Optional[Any] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase, label=_UpperCAmelCase, pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
__A : Union[str, Any] = {
'''hans''': 3,
}
__A : List[Any] = {
'''hans''': HansProcessor,
}
| 138 | 0 |
import numpy as np
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1e-12, lowerCamelCase = 100, ):
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase ) == np.iscomplexobj(lowerCamelCase )
lowercase :Tuple = np.iscomplexobj(lowerCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowercase :List[str] = False
lowercase :Optional[int] = 0
lowercase :List[Any] = 0
lowercase :Optional[Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
lowercase :str = np.dot(lowerCamelCase, lowerCamelCase )
# Normalize the resulting output vector.
lowercase :Any = w / np.linalg.norm(lowerCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowercase :Union[str, Any] = vector.conj().T if is_complex else vector.T
lowercase :Dict = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase ) )
# Check convergence.
lowercase :int = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowercase :Optional[Any] = True
lowercase :List[Any] = lambda_
if is_complex:
lowercase :int = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase__ ( ):
lowercase :Optional[Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowercase :List[str] = np.array([41, 4, 20] )
lowercase :Union[str, Any] = real_input_matrix.astype(np.complexaaa )
lowercase :List[Any] = np.triu(1J * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowercase :Tuple = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowercase :str = real_input_matrix
lowercase :Any = real_vector
elif problem_type == "complex":
lowercase :List[Any] = complex_input_matrix
lowercase :Optional[int] = complex_vector
# Our implementation.
lowercase :Any = power_iteration(lowerCamelCase, lowerCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowercase :List[str] = np.linalg.eigh(lowerCamelCase )
# Last eigenvalue is the maximum one.
lowercase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowercase :List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase ) - np.abs(lowerCamelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 364 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase__ ( lowerCamelCase ):
if is_torch_version("<", "2.0.0" ) or not hasattr(lowerCamelCase, "_dynamo" ):
return False
return isinstance(lowerCamelCase, torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = True ):
lowercase :Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase :str = is_compiled_module(lowerCamelCase )
if is_compiled:
lowercase :str = model
lowercase :str = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Any = model.module
if not keep_fpaa_wrapper:
lowercase :List[Any] = getattr(lowerCamelCase, "forward" )
lowercase :Union[str, Any] = model.__dict__.pop("_original_forward", lowerCamelCase )
if original_forward is not None:
while hasattr(lowerCamelCase, "__wrapped__" ):
lowercase :Tuple = forward.__wrapped__
if forward == original_forward:
break
lowercase :Tuple = forward
if getattr(lowerCamelCase, "_converted_to_transformer_engine", lowerCamelCase ):
convert_model(lowerCamelCase, to_transformer_engine=lowerCamelCase )
if is_compiled:
lowercase :List[Any] = model
lowercase :Optional[int] = compiled_model
return model
def UpperCAmelCase__ ( ):
PartialState().wait_for_everyone()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase, lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase, lowerCamelCase )
@contextmanager
def UpperCAmelCase__ ( **lowerCamelCase ):
for key, value in kwargs.items():
lowercase :List[str] = str(lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase__ ( lowerCamelCase ):
if not hasattr(lowerCamelCase, "__qualname__" ) and not hasattr(lowerCamelCase, "__name__" ):
lowercase :Optional[int] = getattr(lowerCamelCase, "__class__", lowerCamelCase )
if hasattr(lowerCamelCase, "__qualname__" ):
return obj.__qualname__
if hasattr(lowerCamelCase, "__name__" ):
return obj.__name__
return str(lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for key, value in source.items():
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = destination.setdefault(lowerCamelCase, {} )
merge_dicts(lowerCamelCase, lowerCamelCase )
else:
lowercase :Optional[Any] = value
return destination
def UpperCAmelCase__ ( lowerCamelCase = None ):
if port is None:
lowercase :Tuple = 29500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 158 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
A__: Optional[Any] = '''examples/'''
A__: Optional[Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
A__: Optional[int] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
A__: int = '''README.md'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Any ) -> Tuple:
with open(_UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_a : Any =f.read()
_a , _a : Any =REPLACE_PATTERNS[pattern]
_a : List[Any] =replace.replace("""VERSION""" ,_UpperCAmelCase )
_a : str =re_pattern.sub(_UpperCAmelCase ,_UpperCAmelCase )
with open(_UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.write(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Tuple:
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase ,pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int]=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
_a : List[Any] ="""🤗 Transformers currently provides the following architectures"""
_a : str ="""1. Want to contribute a new model?"""
with open(_UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_a : str =f.readlines()
# Find the start of the list.
_a : Union[str, Any] =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : Dict =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_a : Optional[int] =lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" ,"""https://huggingface.co/docs/diffusers/model_doc""" ,)
index += 1
with open(_UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
with open(REPLACE_FILES["""init"""] ,"""r""" ) as f:
_a : List[str] =f.read()
_a : Dict =REPLACE_PATTERNS["""init"""][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple=False ) -> str:
_a : Optional[int] =get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_a : Union[str, Any] =default_version.base_version
elif patch:
_a : str =F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
_a : Optional[int] =F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
_a : Dict =input(F"Which version are you releasing? [{default_version}]" )
if len(_UpperCAmelCase ) == 0:
_a : Optional[int] =default_version
print(F"Updating version to {version}." )
global_version_update(_UpperCAmelCase ,patch=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_a : Dict =get_version()
_a : Any =F"{current_version.major}.{current_version.minor + 1}.0.dev0"
_a : int =current_version.base_version
# Check with the user we got that right.
_a : List[Any] =input(F"Which version are we developing now? [{dev_version}]" )
if len(_UpperCAmelCase ) == 0:
_a : Tuple =dev_version
print(F"Updating version to {version}." )
global_version_update(_UpperCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
A__: int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
A__: Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 276 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | 1 |
import functools
from typing import Any
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> bool:
# Validation
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or not all(
isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
UpperCamelCase_ = {}
UpperCamelCase_ = "WORD_KEEPER"
for word in words:
UpperCamelCase_ = trie
for c in word:
if c not in trie_node:
UpperCamelCase_ = {}
UpperCamelCase_ = trie_node[c]
UpperCamelCase_ = True
UpperCamelCase_ = len(UpperCamelCase_ )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCamelCase_ ) -> bool:
if index == len_string:
return True
UpperCamelCase_ = trie
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = trie_node.get(string[i] , UpperCamelCase_ )
if trie_node is None:
return False
if trie_node.get(UpperCamelCase_ , UpperCamelCase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 5_1_2,
}
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str="<pad>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: int="<mask_2>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask_1>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=103 , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> None:
"""simple docstring"""
UpperCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'''
f''' {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCamelCase_ = additional_special_tokens_extended
else:
UpperCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = mask_token_sent
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
UpperCamelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
@property
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowercase ( self: int ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCamelCase_ = self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCamelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCamelCase_ = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
return 1
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List , _SCREAMING_SNAKE_CASE: Optional[List] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 328 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(lowercase_ , exponent // 2 , lowercase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase_ , exponent - 1 , lowercase_ )) % modulo_value
def _SCREAMING_SNAKE_CASE ( lowercase_ = 17_77 , lowercase_ = 18_55 , lowercase_ = 8 ) -> int:
A__ = base
for _ in range(1 , lowercase_ ):
A__ = _modexpt(lowercase_ , lowercase_ , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 247 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
default=A_, metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(A_ )} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
lowercase__ = field(
default=1_28, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowercase__ = field(
default=1_28, metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''}, )
lowercase__ = field(
default=64, metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
}, )
lowercase__ = field(
default=30, metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
lowercase__ = field(
default=0.0, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=20, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=0, metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
}, )
lowercase__ = field(default=1, metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''train'''
lowercase__ = '''dev'''
class UpperCAmelCase_ ( A_ ):
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
def __init__( self : List[Any] , snake_case_ : SquadDataTrainingArguments , snake_case_ : PreTrainedTokenizer , snake_case_ : Optional[int] = None , snake_case_ : Union[str, Split] = Split.train , snake_case_ : Optional[bool] = False , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = "pt" , ) -> Union[str, Any]:
'''simple docstring'''
A__ = args
A__ = is_language_sensitive
A__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case_ , snake_case_ ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
A__ = mode
# Load data features from cache or dataset file
A__ = "v2" if args.version_2_with_negative else "v1"
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + ".lock"
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(snake_case_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A__ = self.old_features["features"]
A__ = self.old_features.get("dataset" , snake_case_ )
A__ = self.old_features.get("examples" , snake_case_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
A__, A__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case_ , )
A__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , snake_case_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , snake_case_ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ = self.features[i]
A__ = torch.tensor(feature.input_ids , dtype=torch.long )
A__ = torch.tensor(feature.attention_mask , dtype=torch.long )
A__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
A__ = torch.tensor(feature.cls_index , dtype=torch.long )
A__ = torch.tensor(feature.p_mask , dtype=torch.float )
A__ = torch.tensor(feature.is_impossible , dtype=torch.float )
A__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A__ = torch.tensor(feature.start_position , dtype=torch.long )
A__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 247 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def _lowerCAmelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[0] * no_of_processes
_SCREAMING_SNAKE_CASE =[0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =burst_time[i]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =-1
for i in range(_UpperCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_SCREAMING_SNAKE_CASE =ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_SCREAMING_SNAKE_CASE =i
total_time += burst_time[target_process]
completed += 1
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =(
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _lowerCAmelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : list[int] ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[0] * no_of_processes
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
lowerCamelCase : Union[str, Any] = 4
lowerCamelCase : int = [2, 5, 3, 7]
lowerCamelCase : List[Any] = [0, 0, 0, 0]
lowerCamelCase : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase : Any = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 114 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCamelCase : Optional[int] = os.path.join(git_repo_path, "src", "transformers")
lowerCamelCase : Union[str, Any] = "\n{0} = None\n"
lowerCamelCase : Optional[Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
lowerCamelCase : List[Any] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class A__ ( unittest.TestCase ):
def A ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(_a )
_SCREAMING_SNAKE_CASE =find_backend(' if not is_tokenizers_available():' )
self.assertEqual(_a , 'tokenizers' )
_SCREAMING_SNAKE_CASE =find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(_a , 'tensorflow_text' )
_SCREAMING_SNAKE_CASE =find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(_a , 'sentencepiece_and_tokenizers' )
_SCREAMING_SNAKE_CASE =find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(_a , 'sentencepiece_and_tensorflow_text' )
_SCREAMING_SNAKE_CASE =find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(_a , 'sentencepiece_and_tokenizers_and_vision' )
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , _a )
self.assertIn('tensorflow_text' , _a )
self.assertIn('sentencepiece_and_tokenizers' , _a )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(_a , '\nCONSTANT = None\n' )
_SCREAMING_SNAKE_CASE =create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
_a , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_SCREAMING_SNAKE_CASE ='\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
_SCREAMING_SNAKE_CASE =create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(_a , _a )
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
_SCREAMING_SNAKE_CASE =create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , _a )
| 114 | 1 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowercase = "\\n\n"
lowercase = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
lowercase = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _UpperCamelCase ( self , a , a , a = 16 , a = True , a=None ) -> Any:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case_ = 'cuda'
else:
snake_case_ = 'cuda' if torch.cuda.is_available() else 'cpu'
snake_case_ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case_ = model.to(SCREAMING_SNAKE_CASE_ )
snake_case_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(SCREAMING_SNAKE_CASE_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case_ = model.config.max_length - 1
else:
snake_case_ = model.config.max_length
snake_case_ = tokenizer(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).to(SCREAMING_SNAKE_CASE_ )
snake_case_ = encodings['input_ids']
snake_case_ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case_ = []
snake_case_ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ):
snake_case_ = min(start_index + batch_size , len(SCREAMING_SNAKE_CASE_ ) )
snake_case_ = encoded_texts[start_index:end_index]
snake_case_ = attn_masks[start_index:end_index]
if add_start_token:
snake_case_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(SCREAMING_SNAKE_CASE_ )
snake_case_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(SCREAMING_SNAKE_CASE_ ), attn_mask] , dim=1 )
snake_case_ = encoded_batch
with torch.no_grad():
snake_case_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).logits
snake_case_ = out_logits[..., :-1, :].contiguous()
snake_case_ = labels[..., 1:].contiguous()
snake_case_ = attn_mask[..., 1:].contiguous()
snake_case_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , SCREAMING_SNAKE_CASE_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(SCREAMING_SNAKE_CASE_ )}
| 178 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size['''height'''] / crop_pct )
else:
__UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image:
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 328 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowercase__ : Dict = random.Random()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None) -> str:
if rng is None:
a = global_rng
a = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
def __init__( self , A , A=7 , A=400 , A=2000 , A=24 , A=24 , A=0.0 , A=16000 , A=True , A=True , ) -> str:
'''simple docstring'''
a = parent
a = batch_size
a = min_seq_length
a = max_seq_length
a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a = feature_size
a = num_mel_bins
a = padding_value
a = sampling_rate
a = return_attention_mask
a = do_normalize
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase_ ( self , A=False , A=False ) -> Union[str, Any]:
'''simple docstring'''
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( UpperCamelCase__ , unittest.TestCase ):
a : Any = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = SpeechaTextFeatureExtractionTester(self )
def lowerCAmelCase_ ( self , A ) -> Any:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = [np.asarray(A ) for speech_input in speech_inputs]
# Test feature size
a = feature_extractor(A , padding=A , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
a = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
a = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
a = feature_extractor(A , return_tensors="np" ).input_features
a = feature_extractor(A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a = np.asarray(A )
a = feature_extractor(A , return_tensors="np" ).input_features
a = feature_extractor(A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = ["longest", "max_length", "do_not_pad"]
a = [None, 16, None]
for max_length, padding in zip(A , A ):
a = feature_extractor(
A , padding=A , max_length=A , return_attention_mask=A )
a = inputs.input_features
a = inputs.attention_mask
a = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = ["longest", "max_length", "do_not_pad"]
a = [None, 16, None]
for max_length, padding in zip(A , A ):
a = feature_extractor(
A , max_length=A , padding=A , return_tensors="np" , return_attention_mask=A )
a = inputs.input_features
a = inputs.attention_mask
a = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = feature_extractor(
A , padding="max_length" , max_length=4 , truncation=A , return_tensors="np" , return_attention_mask=A , )
a = inputs.input_features
a = inputs.attention_mask
a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = feature_extractor(
A , padding="longest" , max_length=4 , truncation=A , return_tensors="np" , return_attention_mask=A , )
a = inputs.input_features
a = inputs.attention_mask
a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = feature_extractor(
A , padding="longest" , max_length=16 , truncation=A , return_tensors="np" , return_attention_mask=A , )
a = inputs.input_features
a = inputs.attention_mask
a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = np.random.rand(100 , 32 ).astype(np.floataa )
a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase_ ( self , A ) -> Dict:
'''simple docstring'''
from datasets import load_dataset
a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a = ds.sort("id" ).select(range(A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
a = self._load_datasamples(1 )
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = feature_extractor(A , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , A , atol=1e-4 ) )
| 366 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
lowercase__ : List[str] = "sshleifer/student_marian_en_ro_6_1"
lowercase__ : List[Any] = "sshleifer/tiny-mbart"
@require_torch
class a__ ( UpperCamelCase__ ):
def lowerCAmelCase_ ( self , A=False , A=None , A=True , A=True , A=True , A=True , ) -> List[Any]:
'''simple docstring'''
a = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A , num_train_epochs=1 , distributed=A , extra_args_str=A , predict_with_generate=A , do_train=A , do_eval=A , do_predict=A , )
a = TrainerState.load_from_json(os.path.join(A , "trainer_state.json" ) ).log_history
if not do_eval:
return
a = [log for log in logs if "eval_loss" in log.keys()]
a = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
a = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , A )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=A )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=A , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=A )
@require_apex
@require_torch_gpu
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self , A ) -> Dict:
'''simple docstring'''
a = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
a = experiments[experiment_id]
a = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
a = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A , extra_args_str=data["extra_args_str"] )
a = len(re.findall(A , cl.err ) )
self.assertEqual(A , data["n_matches"] )
@slow
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=A , learning_rate=3e-4 , num_train_epochs=10 , distributed=A , )
# Check metrics
a = TrainerState.load_from_json(os.path.join(A , "trainer_state.json" ) ).log_history
a = [log for log in logs if "eval_loss" in log.keys()]
a = eval_metrics[0]
a = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , A )
# test if do_predict saves generations and metrics
a = os.listdir(A )
a = {os.path.basename(A ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A ) -> Tuple[int, float]:
a = "--skip_memory_metrics 0"
a = self.run_trainer(
max_len=128 , model_name=A , learning_rate=3e-4 , num_train_epochs=1 , optim=A , distributed=A , extra_args_str=A , do_eval=A , do_predict=A , n_gpus_to_use=1 , )
# Check metrics
a = TrainerState.load_from_json(Path(A , "trainer_state.json" ) ).log_history
a = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
a = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
a = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
a , a , a = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
a , a , a = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
a = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
a = gpu_peak_mem_orig + gpu_alloc_mem_orig
a = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
a = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
a = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A , A , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
A , A , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
A , A , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase_ ( self , A , A , A , A = 3e-3 , A = "adafactor" , A = False , A = None , A = 0 , A = True , A = True , A = True , A = True , A = None , ) -> Tuple:
'''simple docstring'''
a = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
a = self.get_auto_remove_tmp_dir()
a = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(A )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(A )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
a = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(A )}
'''.split()
a = "\n --do_predict\n ".split()
a = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
a = get_gpu_count()
a = get_torch_dist_unique_port()
a = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
a = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A , env=self.get_env() )
else:
a = ["run_translation.py"] + args
with patch.object(A , "argv" , A ):
main()
return output_dir
| 180 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
snake_case_ = "ylacombe/bark-small"
snake_case_ = tempfile.mkdtemp()
snake_case_ = "en_speaker_1"
snake_case_ = "This is a test string"
snake_case_ = "speaker_embeddings_path.json"
snake_case_ = "speaker_embeddings"
def lowerCAmelCase__ ( self : Union[str, Any] , **_lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case_ = 3_5
snake_case_ = 2
snake_case_ = 8
snake_case_ = {
"semantic_prompt": np.ones(_lowerCAmelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case_ = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
snake_case_ = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case_ = os.path.join(self.tmpdirname , "file.npz" )
np.savez(_lowerCAmelCase , **_lowerCAmelCase )
snake_case_ = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
snake_case_ = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=_lowerCAmelCase )
snake_case_ = processor(text=self.input_string )
snake_case_ = tokenizer(
self.input_string , padding="max_length" , max_length=2_5_6 , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 159 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = []
for part_id in partition_order:
_lowerCAmelCase = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(100 ).repartition(1 )
_lowerCAmelCase = Spark(SCREAMING_SNAKE_CASE_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(10 ).repartition(2 )
_lowerCAmelCase = [1, 0]
_lowerCAmelCase = _generate_iterable_examples(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Reverse the partitions.
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(10 ).repartition(1 )
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
_lowerCAmelCase = lambda SCREAMING_SNAKE_CASE_ : x.reverse()
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [2, 1, 0] )
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shuffle_data_sources(SCREAMING_SNAKE_CASE_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCAmelCase = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase , _lowerCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a():
'''simple docstring'''
_lowerCAmelCase = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_lowerCAmelCase = spark.range(100 ).repartition(1 )
_lowerCAmelCase = Spark(SCREAMING_SNAKE_CASE_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 158 | 0 |
"""simple docstring"""
import math
import sys
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
if number != int(__UpperCamelCase ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
__UpperCamelCase =[-1] * (number + 1)
__UpperCamelCase =0
for i in range(1 , number + 1 ):
__UpperCamelCase =sys.maxsize
__UpperCamelCase =int(math.sqrt(__UpperCamelCase ) )
for j in range(1 , root + 1 ):
__UpperCamelCase =1 + answers[i - (j**2)]
__UpperCamelCase =min(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | """simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = BartphoTokenizer
lowercase__ = False
lowercase__ = True
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
super().setUp()
__UpperCamelCase =['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__UpperCamelCase =BartphoTokenizer(UpperCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : List[str] , **UpperCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
__UpperCamelCase ='''This is a là test'''
__UpperCamelCase ='''This is a<unk><unk> test'''
return input_text, output_text
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =BartphoTokenizer(UpperCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__UpperCamelCase ='''This is a là test'''
__UpperCamelCase ='''▁This ▁is ▁a ▁l à ▁t est'''.split()
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 85 | 1 |
"""simple docstring"""
import requests
lowerCAmelCase__ : Optional[int] = 'YOUR API KEY'
def a_ ( lowerCamelCase , lowerCamelCase = giphy_api_key ):
UpperCAmelCase__ = '+'.join(query.split() )
UpperCAmelCase__ = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
UpperCAmelCase__ = requests.get(lowerCamelCase ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 98 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size['''height'''] / crop_pct )
else:
__UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image:
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 328 | 0 |
"""simple docstring"""
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Dict = "facebook/bart-large-mnli"
_lowerCamelCase :Union[str, Any] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
_lowerCamelCase :List[Any] = "text_classifier"
_lowerCamelCase :Tuple = AutoTokenizer
_lowerCamelCase :Optional[int] = AutoModelForSequenceClassification
_lowerCamelCase :Any = ["text", ["text"]]
_lowerCamelCase :List[Any] = ["text"]
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().setup()
lowerCAmelCase__ : Dict = self.model.config
lowerCAmelCase__ : Dict = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
lowerCAmelCase__ : Optional[Any] = int(UpperCamelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = labels
return self.pre_processor(
[text] * len(UpperCamelCase ) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Dict ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = outputs.logits
lowerCAmelCase__ : Union[str, Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 370 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A = """base_with_context"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
lowerCAmelCase__ : int = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : str = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : str = ly_weight["""attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : int = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Any = ly_weight["""attention"""]
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCAmelCase__ : List[Any] = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = ly_weight["""self_attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCAmelCase__ : Optional[int] = jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
lowerCAmelCase__ : Dict = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
lowerCAmelCase__ : Tuple = inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
lowerCAmelCase__ : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCAmelCase__ : Optional[Any] = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Any = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
lowerCAmelCase__ : Optional[Any] = SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_A = parser.parse_args()
main(args)
| 212 | 0 |
import random
class a :
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( __lowercase : str ) -> tuple[list[int], list[int]]:
__UpperCAmelCase : Union[str, Any] = [ord(__lowercase ) for i in text]
__UpperCAmelCase : str = []
__UpperCAmelCase : Optional[Any] = []
for i in plain:
__UpperCAmelCase : str = random.randint(1 , 300 )
__UpperCAmelCase : List[str] = (i + k) * k
cipher.append(__lowercase )
key.append(__lowercase )
return cipher, key
@staticmethod
def UpperCAmelCase ( __lowercase : list[int] , __lowercase : list[int] ) -> str:
__UpperCAmelCase : List[str] = []
for i in range(len(__lowercase ) ):
__UpperCAmelCase : Tuple = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__lowercase ) )
return "".join(__lowercase )
if __name__ == "__main__":
a ,a : Any = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 114 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'MCTCTFeatureExtractor'
a : str = 'AutoTokenizer'
def __init__( self : Tuple , __lowercase : int , __lowercase : Dict ) -> Any:
super().__init__(__lowercase , __lowercase )
__UpperCAmelCase : Optional[Any] = self.feature_extractor
__UpperCAmelCase : Optional[int] = False
def __call__( self : int , *__lowercase : Tuple , **__lowercase : Optional[int] ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowercase , **__lowercase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__UpperCAmelCase : Dict = kwargs.pop("""raw_speech""" )
else:
__UpperCAmelCase : Dict = kwargs.pop("""audio""" , __lowercase )
__UpperCAmelCase : List[str] = kwargs.pop("""sampling_rate""" , __lowercase )
__UpperCAmelCase : Tuple = kwargs.pop("""text""" , __lowercase )
if len(__lowercase ) > 0:
__UpperCAmelCase : Tuple = args[0]
__UpperCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__UpperCAmelCase : Tuple = self.feature_extractor(__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
if text is not None:
__UpperCAmelCase : str = self.tokenizer(__lowercase , **__lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCAmelCase : Dict = encodings["""input_ids"""]
return inputs
def UpperCAmelCase ( self : Optional[Any] , *__lowercase : List[Any] , **__lowercase : int ) -> List[Any]:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Optional[int] , *__lowercase : Optional[Any] , **__lowercase : List[str] ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowercase , **__lowercase )
__UpperCAmelCase : Optional[int] = kwargs.pop("""input_features""" , __lowercase )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""labels""" , __lowercase )
if len(__lowercase ) > 0:
__UpperCAmelCase : Union[str, Any] = args[0]
__UpperCAmelCase : str = args[1:]
if input_features is not None:
__UpperCAmelCase : Any = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
if labels is not None:
__UpperCAmelCase : Union[str, Any] = self.tokenizer.pad(__lowercase , **__lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__UpperCAmelCase : Any = labels["""input_ids"""]
return input_features
def UpperCAmelCase ( self : Any , *__lowercase : Union[str, Any] , **__lowercase : Dict ) -> List[Any]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@contextmanager
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__UpperCAmelCase : Any = True
__UpperCAmelCase : Optional[int] = self.tokenizer
yield
__UpperCAmelCase : List[Any] = self.feature_extractor
__UpperCAmelCase : int = False
| 114 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304 | 1 |
'''simple docstring'''
import qiskit
def snake_case_ ( lowerCAmelCase_ = 2 )-> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase : List[str] = qubits
# Using Aer's simulator
_UpperCAmelCase : str = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
_UpperCAmelCase : Any = qiskit.QuantumCircuit(snake_case__ , snake_case__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , snake_case__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , snake_case__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(snake_case__ ) ) , list(range(snake_case__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_UpperCAmelCase : List[Any] = qiskit.execute(snake_case__ , snake_case__ , shots=1000 )
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""")
| 215 | import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = JukeboxTokenizer
lowerCamelCase :str = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def UpperCAmelCase ( self ) -> Tuple:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> List[str]:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 180 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
A_ : Optional[Any] = str(bin(lowerCamelCase__ ) )[2:] # remove the leading "0b"
A_ : Dict = str(bin(lowerCamelCase__ ) )[2:]
A_ : int = max(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase__ ) , b_binary.zfill(lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 135 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
lowerCamelCase :Optional[int] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase :int = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase :Tuple = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
lowerCamelCase :str = '''▁'''
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__(self , lowercase , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase=100 , lowercase=None , lowercase = None , lowercase=True , **lowercase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A_ : Any = [F'<extra_id_{i}>' for i in range(lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A_ : Tuple = len(set(filter(lambda lowercase : bool("""extra_id""" in str(lowercase ) ) , lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
A_ : str = legacy
A_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , extra_ids=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase , **lowercase , )
A_ : List[str] = vocab_file
A_ : Tuple = extra_ids
A_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@staticmethod
def _a (lowercase , lowercase , lowercase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
A_ : Union[str, Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , lowercase , )
return max_model_length
@property
def _a (self ):
return self.sp_model.get_piece_size() + self._extra_ids
def _a (self ):
A_ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a (self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase )) + [1]
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1]
def _a (self ):
return list(
set(filter(lambda lowercase : bool(re.search(R"""<extra_id_\d+>""" , lowercase ) ) is not None , self.additional_special_tokens ) ) )
def _a (self ):
return [self._convert_token_to_id(lowercase ) for token in self.get_sentinel_tokens()]
def _a (self , lowercase ):
if len(lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _a (self , lowercase , lowercase = None ):
A_ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _a (self , lowercase , lowercase = None ):
A_ : Optional[Any] = self._add_eos_if_not_present(lowercase )
if token_ids_a is None:
return token_ids_a
else:
A_ : List[Any] = self._add_eos_if_not_present(lowercase )
return token_ids_a + token_ids_a
def __getstate__(self ):
A_ : int = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__(self , lowercase ):
A_ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Dict = {}
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , lowercase , **lowercase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
A_ : Tuple = SPIECE_UNDERLINE + text.replace(lowercase , """ """ )
return super().tokenize(lowercase , **lowercase )
def _a (self , lowercase , **lowercase ):
if not self.legacy:
A_ : Dict = text.startswith(lowercase )
if is_first:
A_ : str = text[1:]
A_ : Optional[int] = self.sp_model.encode(lowercase , out_type=lowercase )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(lowercase ):
A_ : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _a (self , lowercase ):
if token.startswith("""<extra_id_""" ):
A_ : Union[str, Any] = re.match(R"""<extra_id_(\d+)>""" , lowercase )
A_ : str = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase )
def _a (self , lowercase ):
if index < self.sp_model.get_piece_size():
A_ : List[Any] = self.sp_model.IdToPiece(lowercase )
else:
A_ : Dict = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def _a (self , lowercase ):
A_ : Union[str, Any] = []
A_ : int = """"""
A_ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(lowercase )
A_ : Optional[Any] = False
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _a (self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Optional[Any] = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,) | 135 | 1 |
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCamelCase_( snake_case : Dict , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[str] ):
'''simple docstring'''
snake_case_ = multiprocessing.Manager()
snake_case_ = manager.list()
snake_case_ = multiprocessing.Process(target=snake_case , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCamelCase_( snake_case : str , snake_case : Any , snake_case : List[Any] ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
snake_case_ = shutil.rmtree
snake_case_ = os.rmdir
snake_case_ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
snake_case_ = {}
with swallow_io():
with time_limit(snake_case ):
exec(snake_case , snake_case )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(f'failed: {e}' )
# Needed for cleaning up.
snake_case_ = rmtree
snake_case_ = rmdir
snake_case_ = chdir
@contextlib.contextmanager
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
def signal_handler(snake_case : Dict , snake_case : int ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , snake_case )
signal.signal(signal.SIGALRM , snake_case )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case ):
with contextlib.redirect_stderr(snake_case ):
with redirect_stdin(snake_case ):
yield
@contextlib.contextmanager
def UpperCamelCase_( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case ):
yield dirname
class _snake_case ( lowercase_ ):
pass
class _snake_case ( io.StringIO ):
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> int:
'''simple docstring'''
raise OSError
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> Any:
'''simple docstring'''
raise OSError
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> Dict:
'''simple docstring'''
raise OSError
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> Any:
'''simple docstring'''
return False
class _snake_case ( contextlib._RedirectStream ): # type: ignore
lowerCAmelCase_ : List[Any] = "stdin"
@contextlib.contextmanager
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
if root == ".":
yield
return
snake_case_ = os.getcwd()
os.chdir(snake_case )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case )
def UpperCamelCase_( snake_case : int=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
snake_case_ = None
snake_case_ = None
import os
snake_case_ = "1"
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
import shutil
snake_case_ = None
snake_case_ = None
snake_case_ = None
import subprocess
snake_case_ = None # type: ignore
snake_case_ = None
import sys
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
| 85 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 1 |
import argparse
import os
import re
import packaging.version
lowerCAmelCase : Optional[int] = """examples/"""
lowerCAmelCase : int = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowerCAmelCase : Any = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
lowerCAmelCase : Dict = """README.md"""
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_: List[Any] = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_: Dict = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_: Any = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
def A_ ( _UpperCAmelCase ):
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" )
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE_ )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE_: Optional[Any] = "1. Want to contribute a new model?"
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_: Tuple = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_: Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE_: Optional[Any] = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
def A_ ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE_: Any = f.read()
SCREAMING_SNAKE_CASE_: Optional[Any] = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE_ )
def A_ ( _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: Any = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_: str = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_: Tuple = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_: List[Any] = input(f"Which version are you releasing? [{default_version}]" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
SCREAMING_SNAKE_CASE_: Dict = default_version
print(f"Updating version to {version}." )
global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = get_version()
SCREAMING_SNAKE_CASE_: Any = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
SCREAMING_SNAKE_CASE_: str = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_: List[Any] = input(f"Which version are we developing now? [{dev_version}]" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
SCREAMING_SNAKE_CASE_: List[Any] = dev_version
print(f"Updating version to {version}." )
global_version_update(SCREAMING_SNAKE_CASE_ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 366 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Optional[int] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ["""YolosFeatureExtractor"""]
lowerCAmelCase : Tuple = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 127 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.