code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 164 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _snake_case ( UpperCamelCase : Callable , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
UpperCAmelCase : Any = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase : Optional[int] = ya
UpperCAmelCase : int = xa
for k in range(UpperCamelCase ):
UpperCAmelCase : Optional[int] = y[k] + step_size * ode_func(UpperCamelCase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(UpperCamelCase , y[k] ) + ode_func(x + step_size , UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 0 |
from typing import Any
class _UpperCamelCase :
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any ) -> str:
"""simple docstring"""
UpperCamelCase_ = data
UpperCamelCase_ = None
def __repr__( self: int ) -> str:
"""simple docstring"""
return f'''Node({self.data})'''
class _UpperCamelCase :
def __init__( self: Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = None
def __iter__( self: Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.head
while node:
yield node.data
UpperCamelCase_ = node.next
def __len__( self: List[Any] ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self: str ) -> str:
"""simple docstring"""
return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] )
def __getitem__( self: Any , _SCREAMING_SNAKE_CASE: int ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Any ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
UpperCamelCase_ = self.head
for _ in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = current.next
UpperCamelCase_ = data
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , _SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , _SCREAMING_SNAKE_CASE )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Any ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
UpperCamelCase_ = Node(_SCREAMING_SNAKE_CASE )
if self.head is None:
UpperCamelCase_ = new_node
elif index == 0:
UpperCamelCase_ = self.head # link new_node to head
UpperCamelCase_ = new_node
else:
UpperCamelCase_ = self.head
for _ in range(index - 1 ):
UpperCamelCase_ = temp.next
UpperCamelCase_ = temp.next
UpperCamelCase_ = new_node
def lowercase ( self: Tuple ) -> None: # print every node data
"""simple docstring"""
print(self )
def lowercase ( self: List[str] ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def lowercase ( self: List[str] ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
UpperCamelCase_ = self.head # default first node
if index == 0:
UpperCamelCase_ = self.head.next
else:
UpperCamelCase_ = self.head
for _ in range(index - 1 ):
UpperCamelCase_ = temp.next
UpperCamelCase_ = temp.next
UpperCamelCase_ = temp.next.next
return delete_node.data
def lowercase ( self: Optional[Any] ) -> bool:
"""simple docstring"""
return self.head is None
def lowercase ( self: List[str] ) -> None:
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = self.head
while current:
# Store the current node's next node.
UpperCamelCase_ = current.next
# Make the current node's next point backwards
UpperCamelCase_ = prev
# Make the previous node be the current node
UpperCamelCase_ = current
# Make the current node the next node (to progress iteration)
UpperCamelCase_ = next_node
# Return prev in order to put the head at the end
UpperCamelCase_ = prev
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase_ = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCamelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCamelCase_ ) == i
linked_list.insert_nth(UpperCamelCase_ , i + 1 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCamelCase_ ) == 9
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
UpperCamelCase_ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(-8 , 1 ) )
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase_ = [
-9,
100,
Node(77345112 ),
"dlrow olleH",
7,
5555,
0,
-1_92.5_55_55,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
UpperCamelCase_ = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCamelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCamelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase_ = linked_list.delete_head()
assert result == -9
assert (
str(UpperCamelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase_ = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCamelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase_ = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCamelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(UpperCamelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCamelCase_ )
assert (
str(UpperCamelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCamelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCAmelCase_ ( ) -> Any:
from doctest import testmod
testmod()
UpperCamelCase_ = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(UpperCamelCase_ )
print("\nReading/changing Node data using indexing:" )
print(F'''Element at Position 1: {linked_list[1]}''' )
UpperCamelCase_ = input("Enter New Value: " ).strip()
print("New list:" )
print(UpperCamelCase_ )
print(F'''length of linked_list is : {len(UpperCamelCase_ )}''' )
if __name__ == "__main__":
main()
| 328 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 328 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Tuple = 'timm_backbone'
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Dict = backbone
lowercase : List[Any] = num_channels
lowercase : List[Any] = features_only
lowercase : str = use_pretrained_backbone
lowercase : str = True
lowercase : List[str] = out_indices if out_indices is not None else (-1,)
| 337 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowercase_ ) for k, v in self.__dict__.items()} )
| 371 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCAmelCase_ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0Xe000
UpperCAmelCase_ = 0Xe001
UpperCAmelCase_ = 0Xe002
UpperCAmelCase_ = 0Xe003
UpperCAmelCase_ = 0Xe004
# Maps special codepoints to human-readable names.
UpperCAmelCase_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCAmelCase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , _UpperCAmelCase : Dict=chr(_UpperCAmelCase ) , _UpperCAmelCase : int=chr(_UpperCAmelCase ) , _UpperCAmelCase : List[Any]=chr(_UpperCAmelCase ) , _UpperCAmelCase : Any=chr(_UpperCAmelCase ) , _UpperCAmelCase : Dict=chr(_UpperCAmelCase ) , _UpperCAmelCase : List[Any]=chr(_UpperCAmelCase ) , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Dict=20_48 , **_UpperCAmelCase : Union[str, Any] , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , model_max_length=_UpperCAmelCase , **_UpperCAmelCase , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCAmelCase__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCAmelCase__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCAmelCase__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCAmelCase__ = UNICODE_VOCAB_SIZE
UpperCAmelCase__ = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : str ):
"""simple docstring"""
return list(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : str ):
"""simple docstring"""
try:
return ord(_UpperCAmelCase )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_UpperCAmelCase )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
return "".join(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = [1] + ([0] * len(_UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(_UpperCAmelCase )) + [1]
return result
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
return ()
| 61 | 0 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCamelCase ) for s in shape] )}.npy"""
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : List[Any]=(4, 4, 64, 64) , UpperCamelCase : str=False ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase__ : Any = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase , UpperCamelCase ) ) , dtype=UpperCamelCase )
return image
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Tuple=False , UpperCamelCase : str="CompVis/stable-diffusion-v1-4" ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase__ : Tuple = """bf16""" if fpaa else None
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = FlaxUNetaDConditionModel.from_pretrained(
UpperCamelCase , subfolder="""unet""" , dtype=UpperCamelCase , revision=UpperCamelCase )
return model, params
def _lowerCAmelCase ( self : Any , UpperCamelCase : List[Any]=0 , UpperCamelCase : str=(4, 77, 7_68) , UpperCamelCase : List[Any]=False ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase__ : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase , UpperCamelCase ) ) , dtype=UpperCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self : str , UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : str ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = self.get_latents(UpperCamelCase , fpaa=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = self.get_encoder_hidden_states(UpperCamelCase , fpaa=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = model.apply(
{"""params""": params} , UpperCamelCase , jnp.array(UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase , ).sample
assert sample.shape == latents.shape
lowerCAmelCase__ : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase__ : Union[str, Any] = jnp.array(UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCamelCase , UpperCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=UpperCamelCase )
lowerCAmelCase__ : List[Any] = self.get_latents(UpperCamelCase , shape=(4, 4, 96, 96) , fpaa=UpperCamelCase )
lowerCAmelCase__ : List[Any] = self.get_encoder_hidden_states(UpperCamelCase , shape=(4, 77, 10_24) , fpaa=UpperCamelCase )
lowerCAmelCase__ : List[str] = model.apply(
{"""params""": params} , UpperCamelCase , jnp.array(UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase , ).sample
assert sample.shape == latents.shape
lowerCAmelCase__ : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase__ : Optional[Any] = jnp.array(UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCamelCase , UpperCamelCase , atol=1E-2 )
| 242 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase_ ( __UpperCAmelCase ) -> tuple:
return (data["data"], data["target"])
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
lowerCAmelCase__ : List[Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__UpperCAmelCase , __UpperCAmelCase )
# Predict target for test data
lowerCAmelCase__ : Dict = xgb.predict(__UpperCAmelCase )
lowerCAmelCase__ : Any = predictions.reshape(len(__UpperCAmelCase ) , 1 )
return predictions
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Optional[Any] = fetch_california_housing()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = data_handling(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = train_test_split(
__UpperCAmelCase , __UpperCAmelCase , test_size=0.25 , random_state=1 )
lowerCAmelCase__ : Optional[Any] = xgboost(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(__UpperCAmelCase , __UpperCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(__UpperCAmelCase , __UpperCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 242 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ ):
_lowerCamelCase : str = """focalnet"""
def __init__( self : List[Any] , snake_case_ : str=2_2_4 , snake_case_ : int=4 , snake_case_ : Tuple=3 , snake_case_ : Optional[int]=9_6 , snake_case_ : Optional[Any]=False , snake_case_ : Tuple=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , snake_case_ : Tuple=[2, 2, 6, 2] , snake_case_ : str=[2, 2, 2, 2] , snake_case_ : List[Any]=[3, 3, 3, 3] , snake_case_ : str="gelu" , snake_case_ : List[str]=4.0 , snake_case_ : int=0.0 , snake_case_ : List[str]=0.1 , snake_case_ : Tuple=False , snake_case_ : str=1e-4 , snake_case_ : Optional[Any]=False , snake_case_ : Union[str, Any]=False , snake_case_ : List[str]=False , snake_case_ : str=0.0_2 , snake_case_ : Union[str, Any]=1e-5 , snake_case_ : str=3_2 , snake_case_ : Any=None , snake_case_ : Optional[Any]=None , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = use_conv_embed
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = focal_levels
_UpperCAmelCase = focal_windows
_UpperCAmelCase = hidden_act
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_layerscale
_UpperCAmelCase = layerscale_value
_UpperCAmelCase = use_post_layernorm
_UpperCAmelCase = use_post_layernorm_in_modulation
_UpperCAmelCase = normalize_modulator
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = ["stem"] + [f'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 365 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : list ) -> list:
'''simple docstring'''
for i in range(len(__lowercase ) - 1 , 0 , -1 ):
_UpperCAmelCase = False
for j in range(__lowercase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_UpperCAmelCase , _UpperCAmelCase = unsorted[j - 1], unsorted[j]
_UpperCAmelCase = True
for j in range(__lowercase ):
if unsorted[j] > unsorted[j + 1]:
_UpperCAmelCase , _UpperCAmelCase = unsorted[j + 1], unsorted[j]
_UpperCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE :List[str] = input('''Enter numbers separated by a comma:\n''').strip()
__SCREAMING_SNAKE_CASE :Any = [int(item) for item in user_input.split(''',''')]
print(F"{cocktail_shaker_sort(unsorted) = }")
| 156 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ = StableDiffusionPanoramaPipeline
A__ = TEXT_TO_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_BATCH_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler()
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__ = CLIPTextModel(A_ )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=0 ) -> int:
"""simple docstring"""
lowercase__ = torch.manual_seed(A_ )
lowercase__ = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionPanoramaPipeline(**A_ )
lowercase__ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowercase__ = self.get_dummy_inputs(A_ )
lowercase__ = sd_pipe(**A_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 )
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionPanoramaPipeline(**A_ )
lowercase__ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowercase__ = self.get_dummy_inputs(A_ )
lowercase__ = "french fries"
lowercase__ = sd_pipe(**A_ , negative_prompt=A_ )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionPanoramaPipeline(**A_ )
lowercase__ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowercase__ = self.get_dummy_inputs(A_ )
lowercase__ = sd_pipe(**A_ , view_batch_size=2 )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase__ = StableDiffusionPanoramaPipeline(**A_ )
lowercase__ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowercase__ = self.get_dummy_inputs(A_ )
lowercase__ = sd_pipe(**A_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=A_ )
lowercase__ = StableDiffusionPanoramaPipeline(**A_ )
lowercase__ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowercase__ = self.get_dummy_inputs(A_ )
lowercase__ = sd_pipe(**A_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str=0 ) -> Optional[int]:
"""simple docstring"""
lowercase__ = torch.manual_seed(A_ )
lowercase__ = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = "stabilityai/stable-diffusion-2-base"
lowercase__ = DDIMScheduler.from_pretrained(A_ , subfolder="""scheduler""" )
lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**A_ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__ = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=A_ )
lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**A_ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ (self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ = 0
def callback_fn(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ) -> None:
lowercase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase__ = False
lowercase__ = "stabilityai/stable-diffusion-2-base"
lowercase__ = DDIMScheduler.from_pretrained(A_ , subfolder="""scheduler""" )
lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
lowercase__ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
pipe(**A_ , callback=A_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = "stabilityai/stable-diffusion-2-base"
lowercase__ = DDIMScheduler.from_pretrained(A_ , subfolder="""scheduler""" )
lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
lowercase__ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**A_ )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 305 |
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Optional[int] = int(_lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 )
return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else ""
UpperCamelCase : Any = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 52 | 0 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list ) -> list:
_UpperCAmelCase : Dict = len(lowerCAmelCase )
for _ in range(lowerCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 189 |
from typing import Any
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = data
_UpperCAmelCase : Any = None
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : str = temp.next
print()
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Node(A_ )
_UpperCAmelCase : Tuple = self.head
_UpperCAmelCase : Tuple = new_node
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Tuple = node_a.next
_UpperCAmelCase : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 189 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a : Any = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase_ (lowerCAmelCase__: Dict , lowerCAmelCase__: List[str]=None ):
"""simple docstring"""
require_version(deps[pkg] , _lowerCAmelCase )
| 147 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 42
__UpperCamelCase = 42
def a__ ( __lowercase ) -> list[str]:
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def a__ ( __lowercase ) -> BWTTransformDict:
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_A = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_A = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def a__ ( __lowercase , __lowercase ) -> str:
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_A = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_A = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_A = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a_ = "Provide a string that I will generate its BWT transform: "
a_ = input(entry_msg).strip()
a_ = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result["bwt_string"]}\''''
)
a_ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
f'''we get original string \'{original_string}\''''
) | 163 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def a__ ( __lowercase ) -> Any:
random.seed(__lowercase )
np.random.seed(__lowercase )
torch.manual_seed(__lowercase )
torch.cuda.manual_seed_all(__lowercase )
# ^^ safe to call this function even if cuda is not available
class snake_case :
def __init__( self : str , a__ : Iterable[torch.nn.Parameter] , a__ : float = 0.9_9_9_9 , a__ : float = 0.0 , a__ : int = 0 , a__ : bool = False , a__ : Union[float, int] = 1.0 , a__ : Union[float, int] = 2 / 3 , a__ : Optional[Any] = None , a__ : Dict[str, Any] = None , **a__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
if isinstance(a__ , torch.nn.Module ):
_A = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , a__ , standard_warn=a__ , )
_A = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_A = True
if kwargs.get("max_value" , a__ ) is not None:
_A = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , a__ , standard_warn=a__ )
_A = kwargs["max_value"]
if kwargs.get("min_value" , a__ ) is not None:
_A = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , a__ , standard_warn=a__ )
_A = kwargs["min_value"]
_A = list(a__ )
_A = [p.clone().detach() for p in parameters]
if kwargs.get("device" , a__ ) is not None:
_A = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , a__ , standard_warn=a__ )
self.to(device=kwargs["device"] )
_A = None
_A = decay
_A = min_decay
_A = update_after_step
_A = use_ema_warmup
_A = inv_gamma
_A = power
_A = 0
_A = None # set in `step()`
_A = model_cls
_A = model_config
@classmethod
def a_ ( cls : Dict , a__ : str , a__ : str ) -> "EMAModel":
'''simple docstring'''
_A , _A = model_cls.load_config(a__ , return_unused_kwargs=a__ )
_A = model_cls.from_pretrained(a__ )
_A = cls(model.parameters() , model_cls=a__ , model_config=model.config )
ema_model.load_state_dict(a__ )
return ema_model
def a_ ( self : List[Any] , a__ : List[str] ) -> int:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_A = self.model_cls.from_config(self.model_config )
_A = self.state_dict()
state_dict.pop("shadow_params" , a__ )
model.register_to_config(**a__ )
self.copy_to(model.parameters() )
model.save_pretrained(a__ )
def a_ ( self : str , a__ : int ) -> float:
'''simple docstring'''
_A = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_A = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_A = (1 + step) / (10 + step)
_A = min(a__ , self.decay )
# make sure decay is not smaller than min_decay
_A = max(a__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def a_ ( self : List[Any] , a__ : Iterable[torch.nn.Parameter] ) -> Optional[int]:
'''simple docstring'''
if isinstance(a__ , torch.nn.Module ):
_A = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , a__ , standard_warn=a__ , )
_A = parameters.parameters()
_A = list(a__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_A = self.get_decay(self.optimization_step )
_A = decay
_A = 1 - decay
_A = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , a__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_A = deepspeed.zero.GatheredParameters(a__ , modifier_rank=a__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(a__ )
def a_ ( self : Dict , a__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
_A = list(a__ )
for s_param, param in zip(self.shadow_params , a__ ):
param.data.copy_(s_param.to(param.device ).data )
def a_ ( self : List[str] , a__ : int=None , a__ : List[Any]=None ) -> None:
'''simple docstring'''
_A = [
p.to(device=a__ , dtype=a__ ) if p.is_floating_point() else p.to(device=a__ )
for p in self.shadow_params
]
def a_ ( self : Tuple ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def a_ ( self : Union[str, Any] , a__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
_A = [param.detach().cpu().clone() for param in parameters]
def a_ ( self : Union[str, Any] , a__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , a__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_A = None
def a_ ( self : Optional[Any] , a__ : dict ) -> None:
'''simple docstring'''
_A = copy.deepcopy(a__ )
_A = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_A = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , a__ ):
raise ValueError("Invalid min_decay" )
_A = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , a__ ):
raise ValueError("Invalid optimization_step" )
_A = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , a__ ):
raise ValueError("Invalid update_after_step" )
_A = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , a__ ):
raise ValueError("Invalid use_ema_warmup" )
_A = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_A = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_A = state_dict.get("shadow_params" , a__ )
if shadow_params is not None:
_A = shadow_params
if not isinstance(self.shadow_params , a__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(a__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 163 | 1 |
import math
class A :
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= 0.0
__lowercase= 0.0
for i in range(len(lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
for i in range(len(lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowercase= [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowercase= SelfOrganizingMap()
__lowercase= 3
__lowercase= 0.5
for _ in range(lowercase__ ):
for j in range(len(lowercase__ ) ):
# training sample
__lowercase= training_samples[j]
# Compute the winning vector
__lowercase= self_organizing_map.get_winner(lowercase__ , lowercase__ )
# Update the winning vector
__lowercase= self_organizing_map.update(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# classify test sample
__lowercase= [0, 0, 0, 1]
__lowercase= self_organizing_map.get_winner(lowercase__ , lowercase__ )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 295 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A ( nn.Module ):
def __init__(self ):
super().__init__()
__lowercase= nn.Linear(3 , 4 )
__lowercase= nn.BatchNormad(4 )
__lowercase= nn.Linear(4 , 5 )
def _A (self , lowerCAmelCase ):
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) )
class A ( A_ ):
def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
return (args[0] + 1,) + args[1:], kwargs
class A ( A_ ):
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return output + 1
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(test_model._hf_hook , lowerCAmelCase )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(x + 1 )
__lowercase= test_model(x + 2 )
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__lowercase= True
__lowercase= test_model(lowerCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) )
__lowercase= torch.randn(2 , 3 ).to(0 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
__lowercase= {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 295 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class _UpperCamelCase ( A , A ):
'''simple docstring'''
lowerCAmelCase__ = """resnet"""
lowerCAmelCase__ = ["""basic""", """bottleneck"""]
def __init__( self : Any , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Optional[int]=6_4 , _lowerCAmelCase : str=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowerCAmelCase : Any=[3, 4, 6, 3] , _lowerCAmelCase : List[Any]="bottleneck" , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : int=False , _lowerCAmelCase : int=None , _lowerCAmelCase : Any=None , **_lowerCAmelCase : Any , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types)}""")
__lowercase =num_channels
__lowercase =embedding_size
__lowercase =hidden_sizes
__lowercase =depths
__lowercase =layer_type
__lowercase =hidden_act
__lowercase =downsample_in_first_stage
__lowercase =['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_lowerCAmelCase) + 1)]
__lowercase , __lowercase =get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names)
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return 1e-3
| 361 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__=7, UpperCamelCase__=3, UpperCamelCase__=18, UpperCamelCase__=30, UpperCamelCase__=400, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=[0.5, 0.5, 0.5], ):
"""simple docstring"""
lowerCAmelCase_ = size if size is not None else {'''height''': 18, '''width''': 18}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__snake_case = DPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = DPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase, '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase, '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase, '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase, '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} )
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase, Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
lowerCAmelCase_ = image_processing(__UpperCAmelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__UpperCAmelCase, numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase, np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
lowerCAmelCase_ = image_processing(__UpperCAmelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__UpperCAmelCase, torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase, torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
lowerCAmelCase_ = image_processing(__UpperCAmelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
| 278 |
"""simple docstring"""
import math
import os
import sys
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = ""
try:
with open(UpperCAmelCase , "rb" ) as binary_file:
a_ = binary_file.read()
for dat in data:
a_ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
lexicon.pop(UpperCAmelCase )
a_ = last_match_id
if math.loga(UpperCAmelCase ).is_integer():
for curr_key in lexicon:
a_ = "0" + lexicon[curr_key]
a_ = bin(UpperCAmelCase )[2:]
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = {"0": "0", "1": "1"}
a_ , a_ = "", ""
a_ = len(UpperCAmelCase )
for i in range(len(UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
index += 1
a_ = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
a_ = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = os.path.getsize(UpperCAmelCase )
a_ = bin(UpperCAmelCase )[2:]
a_ = len(UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = 8
try:
with open(UpperCAmelCase , "wb" ) as opened_file:
a_ = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(UpperCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = read_file_binary(UpperCAmelCase )
a_ = compress_data(UpperCAmelCase )
a_ = add_file_length(UpperCAmelCase , UpperCAmelCase )
write_file_binary(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 243 | 0 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
__UpperCAmelCase : List[Any] = {"facebook/bart-base": BartForConditionalGeneration}
__UpperCAmelCase : int = {"facebook/bart-base": BartTokenizer}
def A__ ( ) -> Optional[Any]:
__snake_case: Tuple = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""")
parser.add_argument(
"""--validation_file""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""A csv or a json file containing the validation data.""")
parser.add_argument(
"""--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
"""--config_name""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=SCREAMING_SNAKE_CASE__ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""Where to store the final ONNX file.""")
__snake_case: List[Any] = parser.parse_args()
return args
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="cpu") -> Dict:
__snake_case: List[str] = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__)
if model_name in ["facebook/bart-base"]:
__snake_case: Any = 0
__snake_case: Any = None
__snake_case: str = 0
return huggingface_model, tokenizer
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
model.eval()
__snake_case: Tuple = None
__snake_case: List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE__))
with torch.no_grad():
__snake_case: Tuple = """My friends are cool but they eat too many carbs."""
__snake_case: Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""").to(model.device)
__snake_case: Tuple = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , early_stopping=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE__ , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=SCREAMING_SNAKE_CASE__ , )
logger.info("""Model exported to {}""".format(SCREAMING_SNAKE_CASE__))
__snake_case: Any = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE__))
logger.info("""Deduplicated and optimized model written to {}""".format(SCREAMING_SNAKE_CASE__))
__snake_case: int = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = ort_sess.run(
SCREAMING_SNAKE_CASE__ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(SCREAMING_SNAKE_CASE__),
"""max_length""": np.array(SCREAMING_SNAKE_CASE__),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3)
logger.info("""Model outputs from torch and ONNX Runtime are similar.""")
logger.info("""Success.""")
def A__ ( ) -> Dict:
__snake_case: Dict = parse_args()
__snake_case: Any = 5
__snake_case: List[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
__snake_case: List[str] = torch.device(args.device)
__snake_case: Tuple = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE__)
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""")
model.to(SCREAMING_SNAKE_CASE__)
if args.max_length:
__snake_case: Any = args.max_length
if args.num_beams:
__snake_case: List[str] = args.num_beams
if args.output_file_path:
__snake_case: Union[str, Any] = args.output_file_path
else:
__snake_case: Tuple = """BART.onnx"""
logger.info("""Exporting model to ONNX""")
export_and_validate_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
main()
| 366 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
def __init__(self : Dict , snake_case_ : str , snake_case_ : Union[str, Any]=1_3 , snake_case_ : Optional[int]=7 , snake_case_ : Tuple=True , snake_case_ : int=True , snake_case_ : Dict=True , snake_case_ : int=True , snake_case_ : int=9_9 , snake_case_ : Optional[Any]=3_2 , snake_case_ : Optional[int]=5 , snake_case_ : Optional[int]=4 , snake_case_ : Optional[int]=3_7 , snake_case_ : Tuple="gelu" , snake_case_ : Dict=0.1 , snake_case_ : Dict=0.1 , snake_case_ : int=5_1_2 , snake_case_ : int=1_6 , snake_case_ : int=2 , snake_case_ : str=0.02 , snake_case_ : Optional[int]=4 , ):
__a : Any = parent
__a : str = batch_size
__a : List[str] = seq_length
__a : Dict = is_training
__a : int = use_attention_mask
__a : Tuple = use_token_type_ids
__a : int = use_labels
__a : List[str] = vocab_size
__a : Tuple = hidden_size
__a : int = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[str] = intermediate_size
__a : str = hidden_act
__a : int = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[str] = max_position_embeddings
__a : Union[str, Any] = type_vocab_size
__a : Any = type_sequence_label_size
__a : Dict = initializer_range
__a : str = num_choices
def lowerCAmelCase (self : Optional[int] ):
__a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[Any] = None
if self.use_attention_mask:
__a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__a : Dict = None
if self.use_token_type_ids:
__a : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase (self : str ):
__a : List[Any] = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase (self : Union[str, Any] ):
__a : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase (self : Optional[Any] ):
for model_class_name in self.all_model_classes:
__a : Tuple = model_class_name.from_pretrained('''albert-base-v2''' )
__a : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Optional[int] ):
__a : Dict = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
__a : Tuple = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__a : Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__a : List[str] = model(snake_case_ , attention_mask=snake_case_ )[0]
__a : Any = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , snake_case_ )
__a : str = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) )
| 216 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase (self : int ):
__a , __a : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__a : Any = '''A painting of a squirrel eating a burger'''
__a : Dict = jax.device_count()
__a : Optional[int] = num_samples * [prompt]
__a : Optional[Any] = sd_pipe.prepare_inputs(snake_case_ )
__a : Optional[Any] = replicate(snake_case_ )
__a : Optional[int] = shard(snake_case_ )
__a : int = jax.random.PRNGKey(0 )
__a : str = jax.random.split(snake_case_ , jax.device_count() )
__a : int = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=2_5 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__a : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a : List[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__a : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a : str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase (self : Tuple ):
__a : Optional[Any] = '''stabilityai/stable-diffusion-2'''
__a , __a : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case_ , subfolder='''scheduler''' )
__a , __a : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , revision='''bf16''' , dtype=jnp.bfloataa , )
__a : Union[str, Any] = scheduler_params
__a : List[Any] = '''A painting of a squirrel eating a burger'''
__a : Any = jax.device_count()
__a : Any = num_samples * [prompt]
__a : List[Any] = sd_pipe.prepare_inputs(snake_case_ )
__a : Tuple = replicate(snake_case_ )
__a : Dict = shard(snake_case_ )
__a : Dict = jax.random.PRNGKey(0 )
__a : Dict = jax.random.split(snake_case_ , jax.device_count() )
__a : str = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=2_5 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__a : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a : Any = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__a : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a : List[Any] = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 216 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''facebook/bart-large-mnli'''
A__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
A__ = '''text_classifier'''
A__ = AutoTokenizer
A__ = AutoModelForSequenceClassification
A__ = ['''text''', ['''text''']]
A__ = ['''text''']
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setup()
lowercase__ = self.model.config
lowercase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
lowercase__ = int(_UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = labels
return self.pre_processor(
[text] * len(_UpperCAmelCase ) , [f'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = outputs.logits
lowercase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 371 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = OpenAIGPTTokenizer
A__ = OpenAIGPTTokenizerFast
A__ = True
A__ = False
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_UpperCAmelCase ) )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase__ = """lower"""
lowercase__ = ["""low""", """er</w>"""]
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = tokens + ["""<unk>"""]
lowercase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any]=15 ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
lowercase__ = """This is a simple input"""
lowercase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase__ = ("""This is a simple input""", """This is a pair""")
lowercase__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A ( UpperCAmelCase__ ):
'''simple docstring'''
pass
| 146 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : str = 'ZinengTang/tvlt-base'
_a : Optional[Any] = tempfile.mkdtemp()
def __lowercase ( self : List[str] ,**_a : int ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint ,**_a )
def __lowercase ( self : List[str] ,**_a : Optional[Any] ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint ,**_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = self.get_image_processor()
_a : Optional[Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a ,feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor ,_a )
self.assertIsInstance(processor.image_processor ,_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : str = self.get_image_processor()
_a : Any = self.get_feature_extractor()
_a : List[str] = TvltProcessor(image_processor=_a ,feature_extractor=_a )
_a : Tuple = np.ones([1_2000] )
_a : Optional[Any] = feature_extractor(_a ,return_tensors='np' )
_a : str = processor(audio=_a ,return_tensors='np' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = self.get_image_processor()
_a : Tuple = self.get_feature_extractor()
_a : List[str] = TvltProcessor(image_processor=_a ,feature_extractor=_a )
_a : Dict = np.ones([3, 224, 224] )
_a : List[str] = image_processor(_a ,return_tensors='np' )
_a : Tuple = processor(images=_a ,return_tensors='np' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : int = self.get_image_processor()
_a : List[str] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a ,feature_extractor=_a )
_a : int = np.ones([1_2000] )
_a : int = np.ones([3, 224, 224] )
_a : str = processor(audio=_a ,images=_a )
self.assertListEqual(list(inputs.keys() ) ,['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = self.get_image_processor()
_a : Tuple = self.get_feature_extractor()
_a : str = TvltProcessor(image_processor=_a ,feature_extractor=_a )
self.assertListEqual(
processor.model_input_names ,image_processor.model_input_names + feature_extractor.model_input_names ,msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' ,)
| 271 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : str ,_a : Path ,_a : Union[str, None] = None ,_a : Union[List[str], None] = None ,_a : Union[str, List[str], None] = None ,_a : bool = True ,):
'''simple docstring'''
_a : Optional[int] = [file for file in os.listdir(_a ) if os.path.isfile(os.path.join(_a ,_a ) )]
if identifier is not None:
_a : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_a ,_a ):
for n_ in n_identifier:
_a : Tuple = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : List[str] = ignore_files or []
ignore_files.append('__init__.py' )
_a : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_a )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : List[str] = getattr(_a ,_a )
_a : int = doctest.DocTestSuite(_a )
_a : Any = unittest.TextTestRunner().run(_a )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : Union[str, Any] = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : int = Path('src/transformers' )
_a : List[Any] = 'modeling'
_a : Optional[Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_a ,identifier=_a ,ignore_files=_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'tokenization'
self.analyze_directory(_a ,identifier=_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Dict = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_a ,identifier=_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = Path('src/transformers' )
_a : List[Any] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_a ,n_identifier=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_a ,ignore_files=_a ,only_modules=_a )
| 271 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
snake_case : List[Any] = str(lowercase )
return len(lowercase ) == 9 and set(lowercase ) == set("""123456789""" )
def SCREAMING_SNAKE_CASE__ ( ) -> int | None:
for base_num in range(9999 ,4999 ,-1 ):
snake_case : List[str] = 100002 * base_num
if is_9_pandigital(lowercase ):
return candidate
for base_num in range(333 ,99 ,-1 ):
snake_case : List[Any] = 1002003 * base_num
if is_9_pandigital(lowercase ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 176 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
if isinstance(lowercase ,torch.Tensor ):
return image
elif isinstance(lowercase ,PIL.Image.Image ):
snake_case : str = [image]
if isinstance(image[0] ,PIL.Image.Image ):
snake_case : List[Any] = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
snake_case : Optional[int] = np.concatenate(lowercase ,axis=0 )
snake_case : str = np.array(lowercase ).astype(np.floataa ) / 255.0
snake_case : List[str] = image.transpose(0 ,3 ,1 ,2 )
snake_case : Any = 2.0 * image - 1.0
snake_case : Optional[Any] = torch.from_numpy(lowercase )
elif isinstance(image[0] ,torch.Tensor ):
snake_case : Optional[int] = torch.cat(lowercase ,dim=0 )
return image
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=0.9995 ) -> Optional[int]:
if not isinstance(lowercase ,np.ndarray ):
snake_case : Any = True
snake_case : str = va.device
snake_case : Optional[Any] = va.cpu().numpy()
snake_case : str = va.cpu().numpy()
snake_case : Tuple = np.sum(va * va / (np.linalg.norm(lowercase ) * np.linalg.norm(lowercase )) )
if np.abs(lowercase ) > DOT_THRESHOLD:
snake_case : Optional[int] = (1 - t) * va + t * va
else:
snake_case : List[Any] = np.arccos(lowercase )
snake_case : str = np.sin(lowercase )
snake_case : int = theta_a * t
snake_case : Dict = np.sin(lowercase )
snake_case : Optional[Any] = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case : Union[str, Any] = sin_theta_t / sin_theta_a
snake_case : Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
snake_case : List[Any] = torch.from_numpy(lowercase ).to(lowercase )
return va
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
snake_case : Dict = F.normalize(lowercase ,dim=-1 )
snake_case : Optional[Any] = F.normalize(lowercase ,dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
for param in model.parameters():
snake_case : Tuple = value
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> List[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
snake_case : Optional[int] = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size["""shortest_edge"""]
)
snake_case : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def UpperCAmelCase ( self , A = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase ( self ) -> Optional[int]:
self.enable_attention_slicing(A )
def UpperCAmelCase ( self ) -> Any:
set_requires_grad(self.vae , A )
def UpperCAmelCase ( self ) -> List[Any]:
set_requires_grad(self.vae , A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
set_requires_grad(self.unet , A )
def UpperCAmelCase ( self ) -> Tuple:
set_requires_grad(self.unet , A )
def UpperCAmelCase ( self , A , A , A ) -> Dict:
# get the original timestep using init_timestep
snake_case : Tuple = min(int(num_inference_steps * strength ) , A )
snake_case : List[str] = max(num_inference_steps - init_timestep , 0 )
snake_case : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , A , A , A , A , A , A=None ) -> List[str]:
if not isinstance(A , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(A )}""" )
snake_case : str = image.to(device=A , dtype=A )
if isinstance(A , A ):
snake_case : int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
snake_case : str = torch.cat(A , dim=0 )
else:
snake_case : List[Any] = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case : Dict = 0.1_82_15 * init_latents
snake_case : Tuple = init_latents.repeat_interleave(A , dim=0 )
snake_case : Optional[int] = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
snake_case : Union[str, Any] = self.scheduler.add_noise(A , A , A )
snake_case : List[Any] = init_latents
return latents
def UpperCAmelCase ( self , A ) -> int:
snake_case : Optional[Any] = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
snake_case : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def UpperCAmelCase ( self , A , A ) -> List[Any]:
snake_case : Tuple = self.feature_extractor.preprocess(A )
snake_case : List[Any] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case : Optional[int] = self.clip_model.get_image_features(A )
snake_case : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
snake_case : Tuple = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , A , A , A , A , A , A , A , ) -> Any:
snake_case : Dict = latents.detach().requires_grad_()
snake_case : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
snake_case : str = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case : int = self.scheduler.alphas_cumprod[timestep]
snake_case : Tuple = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case : Any = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case : str = torch.sqrt(A )
snake_case : str = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
snake_case : int = self.scheduler.sigmas[index]
snake_case : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case : List[str] = 1 / 0.1_82_15 * sample
snake_case : str = self.vae.decode(A ).sample
snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : str = transforms.Resize(self.feature_extractor_size )(A )
snake_case : Dict = self.normalize(A ).to(latents.dtype )
snake_case : Union[str, Any] = self.clip_model.get_image_features(A )
snake_case : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
snake_case : Optional[int] = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
snake_case : int = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
snake_case : Union[str, Any] = latents.detach() + grads * (sigma**2)
snake_case : Union[str, Any] = noise_pred_original
else:
snake_case : List[str] = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , A , A , A = None , A = None , A = 5_1_2 , A = 5_1_2 , A = 0.6 , A = 5_0 , A = 7.5 , A = 1 , A = 0.0 , A = 1_0_0 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> Union[str, Any]:
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(A )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(A , torch.Generator ) and batch_size > 1:
snake_case : Dict = [generator] + [None] * (batch_size - 1)
snake_case : Tuple = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
snake_case : List[str] = [x[0] for x in coca_is_none if x[1]]
snake_case : Optional[int] = """, """.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
snake_case : Tuple = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
snake_case : List[Any] = self.get_image_description(A )
# get prompt text embeddings for content and style
snake_case : Dict = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors="""pt""" , )
snake_case : str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case : Dict = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors="""pt""" , )
snake_case : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case : List[str] = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
snake_case : List[Any] = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
snake_case : Union[str, Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_offset:
snake_case : Dict = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case , snake_case : List[Any] = self.get_timesteps(A , A , self.device )
snake_case : List[str] = timesteps[:1].repeat(A )
# Preprocess image
snake_case : Dict = preprocess(A , A , A )
snake_case : List[Any] = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
snake_case : Optional[int] = preprocess(A , A , A )
snake_case : Optional[Any] = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
snake_case : str = slerp(A , A , A )
if clip_guidance_scale > 0:
snake_case : List[Any] = self.get_clip_image_embeddings(A , A )
snake_case : Any = self.get_clip_image_embeddings(A , A )
snake_case : Tuple = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case : List[str] = content_text_input.input_ids.shape[-1]
snake_case : Any = self.tokenizer([""""""] , padding="""max_length""" , max_length=A , return_tensors="""pt""" )
snake_case : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case : Tuple = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case : List[Any] = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
snake_case : Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
snake_case : Union[str, Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Dict = {}
if accepts_eta:
snake_case : Union[str, Any] = eta
# check if the scheduler accepts generator
snake_case : List[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case : List[str] = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : List[str] = self.scheduler.scale_model_input(A , A )
# predict the noise residual
snake_case : Any = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case , snake_case : int = noise_pred.chunk(2 )
snake_case : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case : Any = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case , snake_case : List[Any] = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
snake_case : Tuple = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case : str = 1 / 0.1_82_15 * latents
snake_case : Optional[Any] = self.vae.decode(A ).sample
snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 176 | 1 |
"""simple docstring"""
import requests
_a = '' # <-- Put your OpenWeatherMap appid here!
_a = 'https://api.openweathermap.org/data/2.5/'
def __a ( __lowerCamelCase = "Chicago", __lowerCamelCase = APPID ):
return requests.get(URL_BASE + "weather", params=locals() ).json()
def __a ( __lowerCamelCase = "Kolkata, India", __lowerCamelCase = APPID ):
return requests.get(URL_BASE + "forecast", params=locals() ).json()
def __a ( __lowerCamelCase = 55.68, __lowerCamelCase = 12.57, __lowerCamelCase = APPID ):
return requests.get(URL_BASE + "onecall", params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_a = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 61 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __a ( ):
UpperCAmelCase_ : List[Any] = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase_ : Optional[int] = Dataset.from_dict(__lowerCamelCase )
return dataset
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_dataset()
UpperCAmelCase_ : Any = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowercase_ )
| 61 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
"""simple docstring"""
@staticmethod
def a_ ( *__snake_case , **__snake_case ):
pass
@is_pipeline_test
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
snake_case = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def a_ ( self , __snake_case , __snake_case ):
snake_case = object_detector(examples[0] , threshold=0.0 )
snake_case = len(__snake_case )
self.assertGreater(__snake_case , 0 )
self.assertEqual(
__snake_case , [
{
'''score''': ANY(__snake_case ),
'''label''': ANY(__snake_case ),
'''box''': {'''xmin''': ANY(__snake_case ), '''ymin''': ANY(__snake_case ), '''xmax''': ANY(__snake_case ), '''ymax''': ANY(__snake_case )},
}
for i in range(__snake_case )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def a_ ( self ):
pass
@require_torch
def a_ ( self ):
snake_case = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
snake_case = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
snake_case = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def a_ ( self ):
snake_case = pipeline('''zero-shot-object-detection''' )
snake_case = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
snake_case = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def a_ ( self ):
pass
@require_torch
@slow
def a_ ( self ):
snake_case = 0.2
snake_case = pipeline('''zero-shot-object-detection''' )
snake_case = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def a_ ( self ):
snake_case = 2
snake_case = pipeline('''zero-shot-object-detection''' )
snake_case = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , )
| 213 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : int = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 213 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = checkpoint
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : List[str] = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase : int = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase : Dict = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase : Any = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase : Optional[Any] = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase : int = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase : Optional[Any] = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase : List[str] = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase : str = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase : str = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase : int = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase : List[str] = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase : int = vae_state_dict["quant_conv.weight"]
lowerCAmelCase : int = vae_state_dict["quant_conv.bias"]
lowerCAmelCase : List[str] = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase : Tuple = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase : str = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
lowerCAmelCase : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(_UpperCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase : Union[str, Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
lowerCAmelCase : Dict = {
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(_UpperCAmelCase )
}
for i in range(_UpperCAmelCase ):
lowerCAmelCase : int = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase : Optional[Any] = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase : Dict = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase : Optional[Any] = renew_vae_resnet_paths(_UpperCAmelCase )
lowerCAmelCase : str = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
lowerCAmelCase : int = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase : str = 2
for i in range(1, num_mid_res_blocks + 1 ):
lowerCAmelCase : Dict = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
lowerCAmelCase : Dict = renew_vae_resnet_paths(_UpperCAmelCase )
lowerCAmelCase : Dict = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
lowerCAmelCase : List[str] = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase : Union[str, Any] = renew_vae_attention_paths(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
conv_attn_to_linear(_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = num_up_blocks - 1 - i
lowerCAmelCase : List[Any] = [
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase : Optional[Any] = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase : Any = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase : int = renew_vae_resnet_paths(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
lowerCAmelCase : Any = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase : Union[str, Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
lowerCAmelCase : Tuple = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
lowerCAmelCase : Tuple = renew_vae_resnet_paths(_UpperCAmelCase )
lowerCAmelCase : List[str] = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
lowerCAmelCase : str = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase : Tuple = renew_vae_attention_paths(_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
conv_attn_to_linear(_UpperCAmelCase )
return new_checkpoint
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, ) -> Any:
'''simple docstring'''
lowerCAmelCase : List[str] = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
lowerCAmelCase : Optional[int] = io.BytesIO(r.content )
lowerCAmelCase : Optional[Any] = OmegaConf.load(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 512
lowerCAmelCase : str = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
lowerCAmelCase : Dict = {}
with safe_open(_UpperCAmelCase, framework='pt', device='cpu' ) as f:
for key in f.keys():
lowerCAmelCase : int = f.get_tensor(_UpperCAmelCase )
else:
lowerCAmelCase : Union[str, Any] = torch.load(_UpperCAmelCase, map_location=_UpperCAmelCase )["state_dict"]
# Convert the VAE model.
lowerCAmelCase : str = create_vae_diffusers_config(_UpperCAmelCase, image_size=_UpperCAmelCase )
lowerCAmelCase : str = custom_convert_ldm_vae_checkpoint(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Optional[int] = AutoencoderKL(**_UpperCAmelCase )
vae.load_state_dict(_UpperCAmelCase )
vae.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
__A : List[str] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 138 |
'''simple docstring'''
def A_ ( snake_case ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
SCREAMING_SNAKE_CASE:Optional[int] = sorted(string.lower() )
return len(snake_case ) == len(set(snake_case ) )
if __name__ == "__main__":
A_ = input("Enter a string ").strip()
A_ = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 139 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class a ( SCREAMING_SNAKE_CASE__ ):
snake_case__ = 'gpt_neox_japanese'
def __init__( self , _snake_case=3_20_00 , _snake_case=25_60 , _snake_case=32 , _snake_case=32 , _snake_case=4 , _snake_case="gelu" , _snake_case=1.00 , _snake_case=1_00_00 , _snake_case=20_48 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case=True , _snake_case=3_19_96 , _snake_case=3_19_99 , _snake_case=0.1 , _snake_case=0.0 , **_snake_case , ):
"""simple docstring"""
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_multiple_size
lowerCAmelCase = hidden_act
lowerCAmelCase = rotary_pct
lowerCAmelCase = rotary_emb_base
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_cache
lowerCAmelCase = attention_dropout
lowerCAmelCase = hidden_dropout
| 359 |
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 64 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
"""simple docstring"""
from math import pow, sqrt
def _A (*__a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = len(__a ) > 0 and all(value > 0.0 for value in values )
return result
def _A (__a , __a ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__a , __a )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def _A (__a , __a , __a ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__a , __a , __a )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _A (__a , __a , __a ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__a , __a , __a )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _A (__a , __a , __a ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__a , __a , __a )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _A (__a , __a , __a ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__a , __a , __a )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 318 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase_ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _A (__a , __a , __a , __a ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _A (__a ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _A (__a ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__a ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = digit
if sudoku(__a ) is not None:
return grid
SCREAMING_SNAKE_CASE_ : Any = 0
return None
def _A (__a ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
UpperCAmelCase_ : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 318 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Tuple=1_8 , UpperCAmelCase__ : Optional[int]=3_0 , UpperCAmelCase__ : Union[str, Any]=4_0_0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=True , ) -> int:
lowerCAmelCase = size if size is not None else {'height': 1_8, 'width': 1_8}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = apply_ocr
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self : Tuple ) -> int:
lowerCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Any ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def __UpperCAmelCase ( self : Dict ) -> str:
pass
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# with apply_OCR = True
lowerCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
lowerCAmelCase = Image.open(ds[0]['file'] ).convert('RGB' )
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
lowerCAmelCase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
lowerCAmelCase = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 4 | """simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__ = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__ = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 150 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
torch.manual_seed(0)
a__: str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
a__: List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Union[str, Any] = self.dummy_uncond_unet
a__: Optional[int] = DDIMScheduler()
a__: Optional[int] = self.dummy_vq_model
a__: Union[str, Any] = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase)
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: str = torch.manual_seed(0)
a__: Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy').images
a__: Union[str, Any] = torch.manual_seed(0)
a__: int = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase)[0]
a__: Union[str, Any] = image[0, -3:, -3:, -1]
a__: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__: int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
a__: Optional[Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256')
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: List[str] = torch.manual_seed(0)
a__: Optional[int] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy').images
a__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
a__: int = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447])
a__: Any = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
| 364 | """simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowercase__ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowercase__ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowercase__ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowercase__ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__lowerCAmelCase )
class __snake_case :
def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
elif titles is None or texts is None:
a__: str = titles if texts is None else texts
return super().__call__(
lowercase , lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
a__: Tuple = titles if not isinstance(lowercase , lowercase) else [titles]
a__: Optional[int] = texts if not isinstance(lowercase , lowercase) else [texts]
a__: Dict = len(lowercase)
a__: Dict = questions if not isinstance(lowercase , lowercase) else [questions] * n_passages
if len(lowercase) != len(lowercase):
raise ValueError(
f'There should be as many titles than texts but got {len(lowercase)} titles and {len(lowercase)} texts.')
a__: List[str] = super().__call__(lowercase , lowercase , padding=lowercase , truncation=lowercase)['input_ids']
a__: List[Any] = super().__call__(lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase)['input_ids']
a__: Optional[Any] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase , lowercase)
]
}
if return_attention_mask is not False:
a__: Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
a__: int = attention_mask
return self.pad(lowercase , padding=lowercase , max_length=lowercase , return_tensors=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = 16 , lowercase = 64 , lowercase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__: Dict = reader_input['input_ids']
a__ , a__ , a__: Tuple = reader_output[:3]
a__: Tuple = len(lowercase)
a__: Optional[Any] = sorted(range(lowercase) , reverse=lowercase , key=relevance_logits.__getitem__)
a__: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
a__: Tuple = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
a__: Dict = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a__: Any = sequence_ids.index(self.pad_token_id)
else:
a__: Optional[Any] = len(lowercase)
a__: Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase , top_spans=lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase , start_index=lowercase , end_index=lowercase , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowercase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__: Optional[Any] = []
for start_index, start_score in enumerate(lowercase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
a__: str = sorted(lowercase , key=lambda lowercase: x[1] , reverse=lowercase)
a__: Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]')
a__: str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowercase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = READER_PRETRAINED_VOCAB_FILES_MAP
a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = READER_PRETRAINED_INIT_CONFIGURATION
a__ = ["""input_ids""", """attention_mask"""]
| 203 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
A__ = int(lowercase_ )
if n_element < 1:
A__ = ValueError("a should be a positive number" )
raise my_error
A__ = [1]
A__, A__, A__ = (0, 0, 0)
A__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
SCREAMING_SNAKE_CASE = hamming(int(n))
print("-----------------------------------------------------")
print(f'The list with nth numbers is: {hamming_numbers}')
print("-----------------------------------------------------")
| 247 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
lowercase__ = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(default=A_, metadata={'''help''': '''The input training data file (a text file).'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase_ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] , snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(snake_case_ ) for feature in features]
A__ = len(snake_case_ )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
A__ = list(chain(*snake_case_ ) )
A__ = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def _SCREAMING_SNAKE_CASE ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__, A__, A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__, A__, A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
datasets.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
lowercase_ , data_files=lowercase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [f"""ending{i}""" for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase_ ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase_ )
]
# Flatten out
A__ = list(chain(*lowercase_ ) )
A__ = list(chain(*lowercase_ ) )
# Tokenize
A__ = tokenizer(
lowercase_ , lowercase_ , truncation=lowercase_ , max_length=lowercase_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase_ ):
A__, A__ = eval_predictions
A__ = np.argmax(lowercase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase_ , data_collator=lowercase_ , compute_metrics=lowercase_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("train" , lowercase_ )
trainer.save_metrics("train" , lowercase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ )
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("eval" , lowercase_ )
trainer.save_metrics("eval" , lowercase_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 247 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
_A = text_generator("This is a test" , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
_A = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
__UpperCAmelCase , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
_A = text_generator("This is a test" , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{"generated_token_ids": ANY(__UpperCAmelCase )},
{"generated_token_ids": ANY(__UpperCAmelCase )},
] , )
_A = text_generator.model.config.eos_token_id
_A = "<pad>"
_A = text_generator(
["This is a test", "This is a second test"] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{"generated_token_ids": ANY(__UpperCAmelCase )},
{"generated_token_ids": ANY(__UpperCAmelCase )},
],
[
{"generated_token_ids": ANY(__UpperCAmelCase )},
{"generated_token_ids": ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
_A = text_generator("This is a test" , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
_A = text_generator(["This is a test", "This is a second test"] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = "Hello I believe in"
_A = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
_A = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
_A = text_generator(__UpperCAmelCase , stop_sequence=" fe" )
self.assertEqual(__UpperCAmelCase , [{"generated_text": "Hello I believe in fe"}] )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = text_generator.model
_A = text_generator.tokenizer
_A = text_generator("This is a test" )
self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_A = text_generator("This is a test" , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_A = pipeline(task="text-generation" , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
_A = text_generator("This is a test" )
self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_A = text_generator("This is a test" , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_A = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{"generated_text": ANY(__UpperCAmelCase )}, {"generated_text": ANY(__UpperCAmelCase )}],
[{"generated_text": ANY(__UpperCAmelCase )}, {"generated_text": ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_A = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{"generated_text": ANY(__UpperCAmelCase )}, {"generated_text": ANY(__UpperCAmelCase )}],
[{"generated_text": ANY(__UpperCAmelCase )}, {"generated_text": ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
_A = text_generator("test" , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
_A = text_generator("test" , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
_A = text_generator("test" , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_A = text_generator("" )
self.assertEqual(__UpperCAmelCase , [{"generated_text": ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_A = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_A = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
_A = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
_A = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_A = pipe("This is a test" )
self.assertEqual(
__UpperCAmelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_A = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_A = pipe("This is a test" )
self.assertEqual(
__UpperCAmelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_A = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_A = pipe("This is a test" )
self.assertEqual(
__UpperCAmelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
import torch
_A = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
import torch
_A = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=__UpperCAmelCase , top_p=0.5 )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = "Hello world"
_A = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
_A = logging.get_logger("transformers.generation.tf_utils" )
else:
_A = logging.get_logger("transformers.generation.utils" )
_A = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
_A = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
_A = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
_A = text_generator(__UpperCAmelCase , max_length=10 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 174 |
'''simple docstring'''
import os
lowerCamelCase_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = 0
_A = 0
while index < len(__lowercase ) - 1:
_A = SYMBOLS[numerals[index]]
_A = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = ""
_A = num // 1000
numerals += m_count * "M"
num %= 1000
_A = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_A = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowercase ( __lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_A = 0
with open(os.path.dirname(__lowercase ) + roman_numerals_filename ) as filea:
_A = filea.readlines()
for line in lines:
_A = line.strip()
_A = parse_roman_numerals(__lowercase )
_A = generate_roman_numerals(__lowercase )
savings += len(__lowercase ) - len(__lowercase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174 | 1 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : List[Any] = logging.get_logger()
def lowercase_ ( _A : int , _A : str , _A : LevitConfig , _A : Path , _A : bool = True ):
"""simple docstring"""
print(F"Converting {name}..." )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase__ : int = timm.create_model("levit_128s" , pretrained=_A )
else:
lowerCamelCase__ : Tuple = timm.create_model("levit_128" , pretrained=_A )
if hidden_sizes == 192:
lowerCamelCase__ : Optional[Any] = timm.create_model("levit_192" , pretrained=_A )
if hidden_sizes == 256:
lowerCamelCase__ : Optional[Any] = timm.create_model("levit_256" , pretrained=_A )
if hidden_sizes == 384:
lowerCamelCase__ : Optional[int] = timm.create_model("levit_384" , pretrained=_A )
from_model.eval()
lowerCamelCase__ : Union[str, Any] = LevitForImageClassificationWithTeacher(_A ).eval()
lowerCamelCase__ : Optional[Any] = OrderedDict()
lowerCamelCase__ : Optional[Any] = from_model.state_dict()
lowerCamelCase__ : List[str] = list(from_model.state_dict().keys() )
lowerCamelCase__ : int = list(our_model.state_dict().keys() )
print(len(_A ) , len(_A ) )
for i in range(len(_A ) ):
lowerCamelCase__ : str = weights[og_keys[i]]
our_model.load_state_dict(_A )
lowerCamelCase__ : Union[str, Any] = torch.randn((2, 3, 224, 224) )
lowerCamelCase__ : Optional[int] = from_model(_A )
lowerCamelCase__ : str = our_model(_A ).logits
assert torch.allclose(_A , _A ), "The model logits don't match the original one."
lowerCamelCase__ : Dict = name
print(_A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase__ : Dict = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"Pushed {checkpoint_name}" )
def lowercase_ ( _A : Path , _A : str = None , _A : bool = True ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = "imagenet-1k-id2label.json"
lowerCamelCase__ : List[str] = 1000
lowerCamelCase__ : Tuple = (1, num_labels)
lowerCamelCase__ : Optional[int] = "huggingface/label-files"
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : List[str] = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ : List[str] = {int(_A ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = partial(_A , num_labels=_A , idalabel=_A , labelaid=_A )
lowerCamelCase__ : int = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
lowerCamelCase__ : Any = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _A , names_to_config[model_name] , _A , _A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _A , _A , _A , _A )
return config, expected_shape
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
A : int = parser.parse_args()
A : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 184 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : str = logging.get_logger(__name__)
A : int = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "mgp-str"
def __init__( self : List[str] , __lowerCamelCase : List[Any]=[32, 128] , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=27 , __lowerCamelCase : List[str]=38 , __lowerCamelCase : Dict=50257 , __lowerCamelCase : List[Any]=30522 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : List[str]=4.0 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Dict=1E-5 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=0.0_2 , **__lowerCamelCase : Dict , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ : int = image_size
lowerCamelCase__ : Union[str, Any] = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : Union[str, Any] = max_token_length
lowerCamelCase__ : Optional[int] = num_character_labels
lowerCamelCase__ : Union[str, Any] = num_bpe_labels
lowerCamelCase__ : Optional[int] = num_wordpiece_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = mlp_ratio
lowerCamelCase__ : List[Any] = distilled
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = drop_rate
lowerCamelCase__ : List[Any] = qkv_bias
lowerCamelCase__ : int = attn_drop_rate
lowerCamelCase__ : List[Any] = drop_path_rate
lowerCamelCase__ : List[str] = output_aa_attentions
lowerCamelCase__ : Dict = initializer_range
| 184 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ =None
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =None
snake_case_ =None
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =True
snake_case_ =None
snake_case_ =1
snake_case_ =None
snake_case_ =False
snake_case_ =None
snake_case_ =None
def lowerCAmelCase__ (self ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(__lowerCamelCase ) for k, v in self.__dict__.items()} )
| 371 |
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
lowerCAmelCase__ : Any = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : List[Any] = input_str.replace(''' ''' ,'''''')
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCamelCase_) == 26
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Union[str, Any] = True
elif char.isupper():
lowerCAmelCase__ : str = True
return all(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def lowerCAmelCase__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCAmelCase__ : Optional[Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' ,setup=lowerCamelCase_))
print(timeit('''is_pangram_faster()''' ,setup=lowerCamelCase_))
print(timeit('''is_pangram_fastest()''' ,setup=lowerCamelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 94 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if isinstance(lowercase , torch.Tensor ):
return image
elif isinstance(lowercase , PIL.Image.Image ):
__lowercase = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowercase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__lowercase = np.concatenate(lowercase , axis=0 )
__lowercase = np.array(lowercase ).astype(np.floataa ) / 255.0
__lowercase = image.transpose(0 , 3 , 1 , 2 )
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(lowercase )
elif isinstance(image[0] , torch.Tensor ):
__lowercase = torch.cat(lowercase , dim=0 )
return image
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase=0.9995 ):
"""simple docstring"""
if not isinstance(lowercase , np.ndarray ):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(lowercase ) * np.linalg.norm(lowercase )) )
if np.abs(lowercase ) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(lowercase )
__lowercase = np.sin(lowercase )
__lowercase = theta_a * t
__lowercase = np.sin(lowercase )
__lowercase = np.sin(theta_a - theta_t ) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(lowercase ).to(lowercase )
return va
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = F.normalize(lowercase , dim=-1 )
__lowercase = F.normalize(lowercase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
for param in model.parameters():
__lowercase = value
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , clip_model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , coca_model=lowerCAmelCase__ , coca_tokenizer=lowerCAmelCase__ , coca_transform=lowerCAmelCase__ , )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size , lowerCAmelCase__ )
else feature_extractor.size['''shortest_edge''']
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCAmelCase__ )
set_requires_grad(self.clip_model , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ = "auto" ) -> Optional[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
set_requires_grad(self.vae , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
set_requires_grad(self.vae , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
set_requires_grad(self.unet , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
set_requires_grad(self.unet , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = min(int(num_inference_steps * strength ) , lowerCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep , 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , torch.Tensor ):
raise ValueError(F"`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase__ )}" )
__lowercase = image.to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase__ )
]
__lowercase = torch.cat(lowerCAmelCase__ , dim=0 )
else:
__lowercase = self.vae.encode(lowerCAmelCase__ ).latent_dist.sample(lowerCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.1_8215 * init_latents
__lowercase = init_latents.repeat_interleave(lowerCAmelCase__ , dim=0 )
__lowercase = randn_tensor(init_latents.shape , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = init_latents
return latents
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.coca_transform(lowerCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = self.feature_extractor.preprocess(lowerCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(lowerCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(lowerCAmelCase__ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(lowerCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.1_8215 * sample
__lowercase = self.vae.decode(lowerCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0 , 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(lowerCAmelCase__ )
__lowercase = self.normalize(lowerCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(lowerCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase__ )
__lowercase = spherical_dist_loss(lowerCAmelCase__ , lowerCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(lowerCAmelCase__ , lowerCAmelCase__ )[0]
if isinstance(self.scheduler , lowerCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(lowerCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 0.6 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1_00 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = 0.8 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , ) -> List[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(F"You have passed {batch_size} batch_size, but only {len(lowerCAmelCase__ )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(lowerCAmelCase__ , torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ''', '''.join(lowerCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCAmelCase__ ):
raise ValueError(
F"Content prompt is None and CoCa [{coca_is_none_str}] is None."
F"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
__lowercase = self.get_image_description(lowerCAmelCase__ )
if style_prompt is None:
if len(lowerCAmelCase__ ):
raise ValueError(
F"Style prompt is None and CoCa [{coca_is_none_str}] is None."
F" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
__lowercase = self.get_image_description(lowerCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(lowerCAmelCase__ , dim=0 )
# set timesteps
__lowercase = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(lowerCAmelCase__ , **lowerCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase , __lowercase = self.get_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , self.device )
__lowercase = timesteps[:1].repeat(lowerCAmelCase__ )
# Preprocess image
__lowercase = preprocess(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = self.prepare_latents(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text_embeddings.dtype , self.device , lowerCAmelCase__ )
__lowercase = preprocess(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = self.prepare_latents(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text_embeddings.dtype , self.device , lowerCAmelCase__ )
__lowercase = slerp(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = slerp(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([''''''] , padding='''max_length''' , max_length=lowerCAmelCase__ , return_tensors='''pt''' )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(lowerCAmelCase__ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=lowerCAmelCase__ ):
for i, t in enumerate(lowerCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase , __lowercase = self.cond_fn(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.1_8215 * latents
__lowercase = self.vae.decode(lowerCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0 , 1 )
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ ) | 210 | from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__a : Optional[Any] = logging.get_logger(__name__)
__a : List[str] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
else:
return _interleave_iterable_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase )
else:
return _concatenate_iterable_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase ) | 210 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowerCAmelCase ( _UpperCAmelCase ):
lowercase = "levit"
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=16 , __UpperCAmelCase=[128, 256, 384] , __UpperCAmelCase=[4, 8, 12] , __UpperCAmelCase=[4, 4, 4] , __UpperCAmelCase=[16, 16, 16] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.0_2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**lowercase_ )
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = kernel_size
__UpperCamelCase = stride
__UpperCamelCase = padding
__UpperCamelCase = hidden_sizes
__UpperCamelCase = num_attention_heads
__UpperCamelCase = depths
__UpperCamelCase = key_dim
__UpperCamelCase = drop_path_rate
__UpperCamelCase = patch_size
__UpperCamelCase = attention_ratio
__UpperCamelCase = mlp_ratio
__UpperCamelCase = initializer_range
__UpperCamelCase = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowerCAmelCase ( _UpperCAmelCase ):
lowercase = version.parse("1.11" )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 1E-4
| 371 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = field
__UpperCamelCase = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
__UpperCamelCase = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = 'utf-8'
__UpperCamelCase = to_json_kwargs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.to_json_kwargs.pop('path_or_buf' , __UpperCAmelCase )
__UpperCamelCase = self.to_json_kwargs.pop('orient' , 'records' )
__UpperCamelCase = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
__UpperCamelCase = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
__UpperCamelCase = self.to_json_kwargs.pop('compression' , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=__UpperCAmelCase ) as buffer:
__UpperCamelCase = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
' was passed. Please provide a local path instead.' )
__UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
__UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 263 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=4 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxRobertaModelTester(self )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("roberta-base" , from_pt=UpperCamelCase )
lowerCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
| 55 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __snake_case ( ):
lowerCamelCase_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=UpperCAmelCase_ )
lowerCamelCase_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=UpperCAmelCase_ )
env_command_parser(subparsers=UpperCAmelCase_ )
launch_command_parser(subparsers=UpperCAmelCase_ )
tpu_command_parser(subparsers=UpperCAmelCase_ )
test_command_parser(subparsers=UpperCAmelCase_ )
# Let's go
lowerCamelCase_ = parser.parse_args()
if not hasattr(UpperCAmelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 55 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
return str(lowercase_ ) == str(lowercase_ )[::-1]
def _lowerCAmelCase ( lowercase_ ):
return int(lowercase_ ) + int(str(lowercase_ )[::-1] )
def _lowerCAmelCase ( lowercase_ = 10000 ):
UpperCAmelCase = []
for num in range(1 , lowercase_ ):
UpperCAmelCase = 0
UpperCAmelCase = num
while iterations < 50:
UpperCAmelCase = sum_reverse(lowercase_ )
iterations += 1
if is_palindrome(lowercase_ ):
break
else:
lychrel_nums.append(lowercase_ )
return len(lowercase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 181 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :CLIPSegForImageSegmentation , lowercase_ :CLIPSegProcessor , lowercase_ :AutoencoderKL , lowercase_ :CLIPTextModel , lowercase_ :CLIPTokenizer , lowercase_ :UNetaDConditionModel , lowercase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ :StableDiffusionSafetyChecker , lowercase_ :CLIPImageProcessor , ) -> List[str]:
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = dict(scheduler.config )
UpperCAmelCase = 1
UpperCAmelCase = FrozenDict(lowercase_ )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = dict(scheduler.config )
UpperCAmelCase = True
UpperCAmelCase = FrozenDict(lowercase_ )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=lowercase_ , segmentation_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
self.enable_attention_slicing(lowercase_ )
def UpperCAmelCase__ ( self :int ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self :Optional[Any] , lowercase_ :Union[str, List[str]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image] , lowercase_ :str , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 50 , lowercase_ :float = 7.5 , lowercase_ :Optional[Union[str, List[str]]] = None , lowercase_ :Optional[int] = 1 , lowercase_ :float = 0.0 , lowercase_ :Optional[torch.Generator] = None , lowercase_ :Optional[torch.FloatTensor] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , lowercase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ :int = 1 , **lowercase_ :int , ) -> int:
UpperCAmelCase = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
UpperCAmelCase = self.segmentation_model(**lowercase_ )
UpperCAmelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase = self.numpy_to_pil(lowercase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , )
| 181 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( )-> int:
'''simple docstring'''
UpperCAmelCase : str ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
UpperCAmelCase : Union[str, Any] =Dataset.from_dict(__lowerCAmelCase )
return dataset
class __snake_case ( lowerCamelCase__ ):
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] =get_dataset()
UpperCAmelCase : Optional[int] =make_duplicate_clusters(snake_case__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str =get_dataset()
UpperCAmelCase , UpperCAmelCase : Tuple =deduplicate_dataset(snake_case__ )
self.assertEqual(len(snake_case__ ) , 2 )
print(snake_case__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , snake_case__ )
| 348 | __snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowerCAmelCase ) != 9:
raise ValueError(__lowerCAmelCase )
try:
UpperCAmelCase : int =int(spanish_id_clean[0:8] )
UpperCAmelCase : Optional[int] =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A_ ( _lowerCamelCase ):
def _lowerCAmelCase (self :Union[str, Any] )-> str:
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCamelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , '''num_encoder_blocks''' ) )
class A_ :
def __init__(self :int , _UpperCamelCase :List[Any] , _UpperCamelCase :Optional[Any]=13 , _UpperCamelCase :str=64 , _UpperCamelCase :Optional[int]=3 , _UpperCamelCase :List[Any]=4 , _UpperCamelCase :int=[2, 2, 2, 2] , _UpperCamelCase :Dict=[8, 4, 2, 1] , _UpperCamelCase :Optional[int]=[16, 32, 64, 128] , _UpperCamelCase :List[Any]=[1, 4, 8, 16] , _UpperCamelCase :List[Any]=[1, 2, 4, 8] , _UpperCamelCase :Dict=True , _UpperCamelCase :int=True , _UpperCamelCase :str="gelu" , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :Tuple=0.0_2 , _UpperCamelCase :int=3 , _UpperCamelCase :int=None , )-> Optional[Any]:
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = num_encoder_blocks
__A = sr_ratios
__A = depths
__A = hidden_sizes
__A = downsampling_rates
__A = num_attention_heads
__A = is_training
__A = use_labels
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = num_labels
__A = scope
def _lowerCAmelCase (self :Optional[int] )-> Optional[int]:
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase (self :List[str] )-> Optional[Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :List[Any] , _UpperCamelCase :List[Any] )-> Optional[int]:
__A = SegformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = model(_UpperCamelCase )
__A = __A = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowerCAmelCase (self :Dict , _UpperCamelCase :int , _UpperCamelCase :Tuple , _UpperCamelCase :int )-> str:
__A = self.num_labels
__A = SegformerForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__A = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Any , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int] )-> List[Any]:
__A = 1
__A = SegformerForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
__A = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_UpperCamelCase )
__A = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCAmelCase (self :str )-> Dict:
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowerCAmelCase (self :List[str] )-> int:
__A = SegformerModelTester(self )
__A = SegformerConfigTester(self , config_class=_UpperCamelCase )
def _lowerCAmelCase (self :Any )-> int:
self.config_tester.run_common_tests()
def _lowerCAmelCase (self :int )-> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCAmelCase (self :Optional[Any] )-> Dict:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_UpperCamelCase )
def _lowerCAmelCase (self :List[str] )-> Optional[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_UpperCamelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def _lowerCAmelCase (self :Union[str, Any] )-> Optional[int]:
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def _lowerCAmelCase (self :Optional[Any] )-> str:
pass
def _lowerCAmelCase (self :Optional[int] )-> List[Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_UpperCamelCase )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _lowerCAmelCase (self :Tuple )-> str:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
for model_class in self.all_model_classes:
__A = True
__A = False
__A = True
__A = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__A = outputs.attentions
__A = sum(self.model_tester.depths )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A = True
__A = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__A = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first attentions (first block, first layer)
__A = (self.model_tester.image_size // 4) ** 2
__A = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__A = (self.model_tester.image_size // 32) ** 2
__A = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__A = len(_UpperCamelCase )
# Check attention is always last and order is fine
__A = True
__A = True
__A = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCamelCase ) )
__A = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first attentions (first block, first layer)
__A = (self.model_tester.image_size // 4) ** 2
__A = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCAmelCase (self :List[str] )-> Optional[int]:
def check_hidden_states_output(_UpperCamelCase :Tuple , _UpperCamelCase :Any , _UpperCamelCase :Tuple ):
__A = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__A = outputs.hidden_states
__A = self.model_tester.num_encoder_blocks
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase (self :Dict )-> str:
if not self.model_tester.is_training:
return
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCamelCase ):
continue
__A = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
__A = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
__A = model(**_UpperCamelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase (self :Union[str, Any] )-> Any:
pass
@slow
def _lowerCAmelCase (self :Tuple )-> List[Any]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = SegformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _a ( ) -> Union[str, Any]:
'''simple docstring'''
__A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class A_ ( unittest.TestCase ):
@slow
def _lowerCAmelCase (self :Optional[Any] )-> List[Any]:
# only resize + normalize
__A = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
__A = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCamelCase )
__A = prepare_img()
__A = image_processor(images=_UpperCamelCase , return_tensors='''pt''' )
__A = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
__A = model(_UpperCamelCase )
__A = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
__A = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def _lowerCAmelCase (self :List[Any] )-> List[str]:
# only resize + normalize
__A = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
__A = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_UpperCamelCase )
__A = prepare_img()
__A = image_processor(images=_UpperCamelCase , return_tensors='''pt''' )
__A = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
__A = model(_UpperCamelCase )
__A = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
__A = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-1 ) )
@slow
def _lowerCAmelCase (self :Any )-> Any:
# only resize + normalize
__A = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
__A = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCamelCase )
__A = prepare_img()
__A = image_processor(images=_UpperCamelCase , return_tensors='''pt''' )
__A = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
__A = model(_UpperCamelCase )
__A = outputs.logits.detach().cpu()
__A = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase , target_sizes=[(500, 300)] )
__A = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
__A = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase )
__A = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
| 250 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_text_model"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self :Any , _UpperCamelCase :int=5_0244 , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :Optional[Any]=64 , _UpperCamelCase :Dict=2048 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Optional[int]=32 , _UpperCamelCase :Dict=128 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :List[str]=1e-6 , _UpperCamelCase :Optional[Any]=1.0 , _UpperCamelCase :Union[str, Any]="gelu_new" , _UpperCamelCase :int=0 , _UpperCamelCase :int=False , _UpperCamelCase :int=0 , _UpperCamelCase :Dict=1 , _UpperCamelCase :Any=False , _UpperCamelCase :Optional[Any]=True , **_UpperCamelCase :Tuple , )-> Dict:
__A = vocab_size
__A = hidden_size
__A = d_kv
__A = d_ff
__A = num_layers
__A = num_heads
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = dropout_rate
__A = layer_norm_epsilon
__A = initializer_factor
__A = use_cache
__A = eos_token_id
__A = decoder_start_token_id
# for backwards compatibility
__A = dense_act_fn
super().__init__(
pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , is_decoder=_UpperCamelCase , **_UpperCamelCase , )
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[Any] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_vision_model"""
def __init__(self :Dict , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :List[str]=768 , _UpperCamelCase :Any=2048 , _UpperCamelCase :Tuple=64 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Tuple="gelu_new" , _UpperCamelCase :Dict=1e-6 , _UpperCamelCase :int=0.0 , _UpperCamelCase :int=0.0 , _UpperCamelCase :Union[str, Any]=1e-10 , _UpperCamelCase :Tuple=1.0 , _UpperCamelCase :Tuple=4096 , _UpperCamelCase :List[str]=32 , _UpperCamelCase :Optional[Any]=128 , **_UpperCamelCase :List[str] , )-> Any:
super().__init__(**_UpperCamelCase )
__A = hidden_size
__A = patch_embed_hidden_size
__A = d_ff
__A = dropout_rate
__A = num_hidden_layers
__A = num_attention_heads
__A = initializer_range
__A = initializer_factor
__A = attention_dropout
__A = layer_norm_eps
__A = dense_act_fn
__A = seq_len
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = d_kv
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[str] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct"""
lowerCAmelCase__ = True
def __init__(self :List[Any] , _UpperCamelCase :str=None , _UpperCamelCase :int=None , _UpperCamelCase :List[Any]=1.0 , _UpperCamelCase :int=0.0_2 , _UpperCamelCase :List[str]=False , _UpperCamelCase :Optional[Any]=False , _UpperCamelCase :int=True , **_UpperCamelCase :Any , )-> Optional[Any]:
super().__init__(tie_word_embeddings=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
if text_config is None:
__A = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__A = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__A = PixaStructTextConfig(**_UpperCamelCase )
__A = PixaStructVisionConfig(**_UpperCamelCase )
__A = self.text_config.decoder_start_token_id
__A = self.text_config.pad_token_id
__A = self.text_config.eos_token_id
__A = initializer_factor
__A = initializer_range
__A = self.initializer_range
__A = self.initializer_range
__A = is_vqa
@classmethod
def _lowerCAmelCase (cls :str , _UpperCamelCase :PixaStructTextConfig , _UpperCamelCase :PixaStructVisionConfig , **_UpperCamelCase :Union[str, Any] )-> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> int:
__A = copy.deepcopy(self.__dict__ )
__A = self.text_config.to_dict()
__A = self.vision_config.to_dict()
__A = self.__class__.model_type
return output
| 250 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class A__ ( _snake_case ):
lowercase = "WhisperFeatureExtractor"
lowercase = "WhisperTokenizer"
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.feature_extractor
A_ = False
def snake_case_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__ , language=UpperCamelCase__ , no_timestamps=UpperCamelCase__ )
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
A_ = kwargs.pop("""audio""" , UpperCamelCase__ )
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
A_ = kwargs.pop("""text""" , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A_ = args[0]
A_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A_ = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ = encodings["""input_ids"""]
return inputs
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__="np" ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.get_prompt_ids(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
| 162 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[list[float]]:
A_ = []
for data in source_data:
for i, el in enumerate(UpperCAmelCase__ ):
if len(UpperCAmelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCAmelCase__ ) )
return data_lists
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list[list[float]]:
A_ = []
for dlist, weight in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = min(UpperCAmelCase__ )
A_ = max(UpperCAmelCase__ )
A_ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A_ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(UpperCAmelCase__ )
score_lists.append(UpperCAmelCase__ )
return score_lists
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[float]:
A_ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCAmelCase__ ):
A_ = final_scores[j] + ele
return final_scores
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list[list[float]]:
A_ = get_data(UpperCAmelCase__ )
A_ = calculate_each_score(UpperCAmelCase__, UpperCAmelCase__ )
A_ = generate_final_scores(UpperCAmelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCAmelCase__ ):
source_data[i].append(UpperCAmelCase__ )
return source_data
| 162 | 1 |
def A ( _lowercase , _lowercase = False ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE : Dict = f"""Expected string as input, found {type(SCREAMING_SNAKE_CASE__ )}"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE : int = f"""Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE__ )}"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE : Any = input_str.split('''_''' )
SCREAMING_SNAKE_CASE : Optional[Any] = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE : Optional[Any] = words[start_index:]
SCREAMING_SNAKE_CASE : Union[str, Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE : str = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 | from __future__ import annotations
import numpy as np
def A ( _lowercase ):
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258 | 0 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = LxmertTokenizer
SCREAMING_SNAKE_CASE__ = LxmertTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def __A (self ) -> Any:
super().setUp()
_lowercase =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A (self , UpperCAmelCase ) -> str:
_lowercase ='''UNwant\u00E9d,running'''
_lowercase ='''unwanted, running'''
return input_text, output_text
def __A (self ) -> Tuple:
_lowercase =self.tokenizer_class(self.vocab_file )
_lowercase =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __A (self ) -> int:
if not self.test_rust_tokenizer:
return
_lowercase =self.get_tokenizer()
_lowercase =self.get_rust_tokenizer()
_lowercase ='''I was born in 92000, and this is falsé.'''
_lowercase =tokenizer.tokenize(UpperCAmelCase )
_lowercase =rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_lowercase =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_lowercase =rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_lowercase =self.get_rust_tokenizer()
_lowercase =tokenizer.encode(UpperCAmelCase )
_lowercase =rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
| 5 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__ = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
__magic_name__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
_lowerCAmelCase : Tuple = import_module('tasks' )
try:
_lowerCAmelCase : Dict = getattr(_A , model_args.task_type )
_lowerCAmelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_lowerCAmelCase : str = token_classification_task.get_labels(data_args.labels )
_lowerCAmelCase : Dict[int, str] = dict(enumerate(_A ) )
_lowerCAmelCase : Optional[int] = len(_A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_A , idalabel=_A , labelaid={label: i for i, label in enumerate(_A )} , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_lowerCAmelCase : List[Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCAmelCase : Optional[int] = (
TokenClassificationDataset(
token_classification_task=_A , data_dir=data_args.data_dir , tokenizer=_A , labels=_A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCAmelCase : Optional[int] = (
TokenClassificationDataset(
token_classification_task=_A , data_dir=data_args.data_dir , tokenizer=_A , labels=_A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_A , _A ) -> Tuple[List[int], List[int]]:
_lowerCAmelCase : Any = np.argmax(_A , axis=2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = preds.shape
_lowerCAmelCase : Any = [[] for _ in range(_A )]
_lowerCAmelCase : List[Any] = [[] for _ in range(_A )]
for i in range(_A ):
for j in range(_A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_A ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_A , _A ),
"precision": precision_score(_A , _A ),
"recall": recall_score(_A , _A ),
"f1": fa_score(_A , _A ),
}
# Data collator
_lowerCAmelCase : Dict = DataCollatorWithPadding(_A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCAmelCase : Dict = Trainer(
model=_A , args=_A , train_dataset=_A , eval_dataset=_A , compute_metrics=_A , data_collator=_A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCAmelCase : Optional[Any] = trainer.evaluate()
_lowerCAmelCase : Optional[int] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _A , _A )
writer.write('%s = %s\n' % (key, value) )
results.update(_A )
# Predict
if training_args.do_predict:
_lowerCAmelCase : str = TokenClassificationDataset(
token_classification_task=_A , data_dir=data_args.data_dir , tokenizer=_A , labels=_A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = trainer.predict(_A )
_lowerCAmelCase , _lowerCAmelCase : Tuple = align_predictions(_A , _A )
_lowerCAmelCase : Tuple = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _A , _A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
_lowerCAmelCase : Optional[Any] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_A , _A , _A )
return results
def lowercase (_A ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 25 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 1 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = SMALL_MODEL_IDENTIFIER
__A = '''pt'''
__A = '''tf'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : int ):
'''simple docstring'''
__A = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__A = TFAutoModel.from_pretrained(self.test_model, from_pt=_lowerCamelCase )
model_tf.save_pretrained(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = '''mock_framework'''
# Framework provided - return whatever the user provides
__A = FeaturesManager.determine_framework(self.test_model, _lowerCamelCase )
self.assertEqual(_lowerCamelCase, _lowerCamelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCamelCase )
__A = FeaturesManager.determine_framework(_lowerCamelCase, _lowerCamelCase )
self.assertEqual(_lowerCamelCase, _lowerCamelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCamelCase )
__A = FeaturesManager.determine_framework(_lowerCamelCase, _lowerCamelCase )
self.assertEqual(_lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCamelCase )
__A = FeaturesManager.determine_framework(_lowerCamelCase )
self.assertEqual(_lowerCamelCase, self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCamelCase )
__A = FeaturesManager.determine_framework(_lowerCamelCase )
self.assertEqual(_lowerCamelCase, self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCamelCase ):
__A = FeaturesManager.determine_framework(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = MagicMock(return_value=_lowerCamelCase )
with patch('''transformers.onnx.features.is_tf_available''', _lowerCamelCase ):
__A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCamelCase, self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__A = MagicMock(return_value=_lowerCamelCase )
with patch('''transformers.onnx.features.is_torch_available''', _lowerCamelCase ):
__A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCamelCase, self.framework_tf )
# Both in environment -> use PyTorch
__A = MagicMock(return_value=_lowerCamelCase )
__A = MagicMock(return_value=_lowerCamelCase )
with patch('''transformers.onnx.features.is_tf_available''', _lowerCamelCase ), patch(
'''transformers.onnx.features.is_torch_available''', _lowerCamelCase ):
__A = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCamelCase, self.framework_pt )
# Both not in environment -> raise error
__A = MagicMock(return_value=_lowerCamelCase )
__A = MagicMock(return_value=_lowerCamelCase )
with patch('''transformers.onnx.features.is_tf_available''', _lowerCamelCase ), patch(
'''transformers.onnx.features.is_torch_available''', _lowerCamelCase ):
with self.assertRaises(_lowerCamelCase ):
__A = FeaturesManager.determine_framework(self.test_model )
| 266 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ):
'''simple docstring'''
__A = size if size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = apply_ocr
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} )
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
self.assertIsInstance(encoding.words, _lowerCamelCase )
self.assertIsInstance(encoding.boxes, _lowerCamelCase )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
# with apply_OCR = True
__A = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' )
__A = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, _lowerCamelCase )
self.assertListEqual(encoding.boxes, _lowerCamelCase )
# with apply_OCR = False
__A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase )
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
| 266 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _a ( unittest.TestCase , _lowerCAmelCase ):
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Optional[int] = load_tool("""text-classification""" )
self.tool.setup()
UpperCAmelCase_: str = load_tool("""text-classification""", remote=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.tool("""That's quite cool""", ["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = self.remote_tool("""That's quite cool""", ["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> Any:
UpperCAmelCase_: Tuple = self.tool(text="""That's quite cool""", labels=["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> int:
UpperCAmelCase_: Dict = self.remote_tool(text="""That's quite cool""", labels=["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
| 82 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
a : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : str = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
a : Dict = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
a : Optional[Any] = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ElectraTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="[UNK]", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[PAD]", SCREAMING_SNAKE_CASE_="[CLS]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, tokenize_chinese_chars=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Optional[int] = getattr(SCREAMING_SNAKE_CASE_, normalizer_state.pop("""type""" ) )
UpperCAmelCase_: Union[str, Any] = do_lower_case
UpperCAmelCase_: Dict = strip_accents
UpperCAmelCase_: List[Any] = tokenize_chinese_chars
UpperCAmelCase_: int = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = do_lower_case
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCAmelCase_: Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Optional[int] = [self.sep_token_id]
UpperCAmelCase_: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Tuple = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 82 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def UpperCamelCase ( _lowerCamelCase : list[Any] ):
create_state_space_tree(_lowerCamelCase , [] , 0 )
def UpperCamelCase ( _lowerCamelCase : list[Any] , _lowerCamelCase : list[Any] , _lowerCamelCase : int ):
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__lowerCAmelCase : list[Any] =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 237 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : Optional[int] =16
__lowerCAmelCase : Tuple =32
def UpperCamelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : DatasetDict , _lowerCamelCase : List[int] , _lowerCamelCase : List[int] , _lowerCamelCase : int = 16 ):
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = DatasetDict(
{
"train": dataset["train"].select(_lowerCamelCase ),
"validation": dataset["train"].select(_lowerCamelCase ),
"test": dataset["validation"],
} )
def tokenize_function(_lowerCamelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["test"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
# New Code #
A__ = []
# Download the dataset
A__ = load_dataset("glue" , "mrpc" )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowerCamelCase ):
A__, A__, A__ = get_fold_dataloaders(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__, A__, A__, A__, A__ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_lowerCamelCase )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowerCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(_lowerCamelCase , dim=0 )
A__ = torch.stack(_lowerCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=_lowerCamelCase , references=_lowerCamelCase )
accelerator.print("Average test metrics from all folds:" , _lowerCamelCase )
def UpperCamelCase ( ):
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=_lowerCamelCase , default=3 , help="The number of splits to perform across the dataset" )
A__ = parser.parse_args()
A__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 237 | 1 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square(_UpperCamelCase : int , _UpperCamelCase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__UpperCAmelCase : Optional[int] = update_area_of_max_square(_UpperCamelCase , col + 1 )
__UpperCAmelCase : int = update_area_of_max_square(row + 1 , col + 1 )
__UpperCAmelCase : Optional[Any] = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
__UpperCAmelCase : List[Any] = 1 + min([right, diagonal, down] )
__UpperCAmelCase : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
__UpperCAmelCase : Optional[int] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__UpperCAmelCase : List[str] = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
__UpperCAmelCase : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
__UpperCAmelCase : str = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
__UpperCAmelCase : List[Any] = 1 + min([right, diagonal, down] )
__UpperCAmelCase : Optional[Any] = max(largest_square_area[0] , _UpperCamelCase )
__UpperCAmelCase : Tuple = sub_problem_sol
return sub_problem_sol
else:
return 0
__UpperCAmelCase : Dict = [0]
__UpperCAmelCase : Any = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )]
__UpperCAmelCase : Any = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__UpperCAmelCase : Union[str, Any] = dp_array[row][col + 1]
__UpperCAmelCase : Optional[int] = dp_array[row + 1][col + 1]
__UpperCAmelCase : Optional[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
__UpperCAmelCase : Optional[int] = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Optional[Any] = max(dp_array[row][col] , _UpperCamelCase )
else:
__UpperCAmelCase : int = 0
return largest_square_area
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = [0] * (cols + 1)
__UpperCAmelCase : Tuple = [0] * (cols + 1)
__UpperCAmelCase : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__UpperCAmelCase : int = current_row[col + 1]
__UpperCAmelCase : List[Any] = next_row[col + 1]
__UpperCAmelCase : str = next_row[col]
if mat[row][col] == 1:
__UpperCAmelCase : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = max(current_row[col] , _UpperCamelCase )
else:
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Any = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 320 |
"""simple docstring"""
UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_UpperCamelCase )
__UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data )
__UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6)
else:
__UpperCAmelCase : List[str] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Tuple = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
__UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__UpperCAmelCase : str = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__UpperCAmelCase : List[str] = encoded_data[:-padding]
__UpperCAmelCase : int = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__UpperCAmelCase : Optional[Any] = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
__UpperCAmelCase : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCamelCase ) , 8 )
]
return bytes(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 1 |
import math
def UpperCamelCase__ ( A__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase__ ( A__ = 1_0001 ) -> int:
try:
snake_case__ : Tuple = int(A__ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
snake_case__ : list[int] = []
snake_case__ : int = 2
while len(A__ ) < nth:
if is_prime(A__ ):
primes.append(A__ )
num += 1
else:
num += 1
return primes[len(A__ ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 143 | from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( _lowerCamelCase ):
@staticmethod
@abstractmethod
def __a ( __UpperCamelCase ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __a ( self ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError()
| 143 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class UpperCAmelCase_ ( _lowerCamelCase ):
'''simple docstring'''
_lowercase : Tuple = '''roc_bert'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=True , _lowercase=0 , _lowercase="absolute" , _lowercase=None , _lowercase=True , _lowercase=True , _lowercase=768 , _lowercase=910 , _lowercase=512 , _lowercase=24_858 , _lowercase=True , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = use_cache
_lowerCAmelCase = enable_pronunciation
_lowerCAmelCase = enable_shape
_lowerCAmelCase = pronunciation_embed_dim
_lowerCAmelCase = pronunciation_vocab_size
_lowerCAmelCase = shape_embed_dim
_lowerCAmelCase = shape_vocab_size
_lowerCAmelCase = concat_input
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = classifier_dropout
super().__init__(pad_token_id=_lowercase , **_lowercase )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""DeiTFeatureExtractor"""]
_lowercase = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : str = '▁'
A_ : int = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
A_ : str = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
A_ : Optional[Any] = {
'facebook/s2t-small-librispeech-asr': 1024,
}
A_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
A_ : str = {'mustc': MUSTC_LANGS}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = MAX_MODEL_INPUT_SIZES
a__ = ["input_ids", "attention_mask"]
a__ = []
def __init__(self , lowercase__ , lowercase__ , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__="<unk>" , lowercase__=False , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ) -> None:
__UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , do_upper_case=lowercase__ , do_lower_case=lowercase__ , tgt_lang=lowercase__ , lang_codes=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
__UpperCAmelCase = do_upper_case
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = load_json(lowercase__ )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = spm_file
__UpperCAmelCase = load_spm(lowercase__ , self.sp_model_kwargs )
if lang_codes is not None:
__UpperCAmelCase = lang_codes
__UpperCAmelCase = LANGUAGES[lang_codes]
__UpperCAmelCase = [F'''<lang:{lang}>''' for lang in self.langs]
__UpperCAmelCase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
__UpperCAmelCase = self.lang_tokens
__UpperCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__UpperCAmelCase = {}
@property
def lowerCAmelCase_ (self ) -> int:
return len(self.encoder )
@property
def lowerCAmelCase_ (self ) -> str:
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase_ (self , lowercase__ ) -> None:
__UpperCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> None:
__UpperCAmelCase = self.lang_code_to_id[tgt_lang]
__UpperCAmelCase = [lang_code_id]
def lowerCAmelCase_ (self , lowercase__ ) -> List[str]:
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Union[str, Any]:
return self.encoder.get(lowercase__ , self.encoder[self.unk_token] )
def lowerCAmelCase_ (self , lowercase__ ) -> str:
return self.decoder.get(lowercase__ , self.unk_token )
def lowerCAmelCase_ (self , lowercase__ ) -> str:
__UpperCAmelCase = []
__UpperCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__UpperCAmelCase = self.sp_model.decode(lowercase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__UpperCAmelCase = []
else:
current_sub_tokens.append(lowercase__ )
__UpperCAmelCase = self.sp_model.decode(lowercase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase_ (self , lowercase__ , lowercase__=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
__UpperCAmelCase = [1] * len(self.prefix_tokens )
__UpperCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Dict:
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__(self , lowercase__ ) -> None:
__UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase = {}
__UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = Path(lowercase__ )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
__UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , lowercase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowercase__ , '''wb''' ) as fi:
__UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (str(lowercase__ ), str(lowercase__ ))
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
__UpperCAmelCase = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE )
spm.Load(str(SCREAMING_SNAKE_CASE ) )
return spm
def __a ( SCREAMING_SNAKE_CASE ) -> Union[Dict, List]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=2 )
| 333 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCamelCase ( lowercase_ ):
lowercase = 42
lowercase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 321 | """simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=7 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : Any=1_8 , SCREAMING_SNAKE_CASE_ : Tuple=3_0 , SCREAMING_SNAKE_CASE_ : int=4_0_0 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : List[str]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
lowercase_ = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size
lowercase_ = do_normalize
lowercase_ = image_mean
lowercase_ = image_std
def _lowercase ( self : Tuple ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[Any] = DPTImageProcessor if is_vision_available() else None
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = DPTImageProcessingTester(self )
@property
def _lowercase ( self : Any ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : int ) -> Dict:
lowercase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} )
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
def _lowercase ( self : Tuple ) -> List[Any]:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self : str ) -> Optional[int]:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self : List[str] ) -> str:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['DeiTFeatureExtractor']
__a = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A ={
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCAmelCase__ : str = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowercase ( _A , _A , _A , _A , _A ) -> Optional[Any]:
for attribute in key.split(""".""" ):
SCREAMING_SNAKE_CASE : int = getattr(_A , _A )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Any = getattr(_A , _A ).shape
else:
SCREAMING_SNAKE_CASE : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : str = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE : int = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "inv_freq":
SCREAMING_SNAKE_CASE : List[str] = value
else:
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowercase ( _A , _A , _A ) -> str:
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == """group""" , )
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : str = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
SCREAMING_SNAKE_CASE : Optional[int] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(_A )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE : List[str] = mapped_key.replace("""*""" , _A )
if "pos_bias_u" in name:
SCREAMING_SNAKE_CASE : Optional[int] = None
elif "pos_bias_v" in name:
SCREAMING_SNAKE_CASE : List[str] = None
elif "weight_g" in name:
SCREAMING_SNAKE_CASE : List[Any] = """weight_g"""
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Optional[int] = """weight_v"""
elif "bias" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : int = """weight"""
elif "running_mean" in name:
SCREAMING_SNAKE_CASE : Tuple = """running_mean"""
elif "inv_freq" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = """inv_freq"""
elif "running_var" in name:
SCREAMING_SNAKE_CASE : str = """running_var"""
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE : str = """num_batches_tracked"""
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowercase ( _A , _A , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = full_name.split("""conv_layers.""" )[-1]
SCREAMING_SNAKE_CASE : Dict = name.split(""".""" )
SCREAMING_SNAKE_CASE : Dict = int(items[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_A )
@torch.no_grad()
def __lowercase ( _A , _A , _A=None , _A=None , _A=True ) -> List[Any]:
if config_path is not None:
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaConformerConfig.from_pretrained(_A , hidden_act="""swish""" )
else:
SCREAMING_SNAKE_CASE : Any = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
SCREAMING_SNAKE_CASE : Dict = """rotary"""
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE : List[str] = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE : str = target_dict.pad_index
SCREAMING_SNAKE_CASE : List[Any] = target_dict.bos_index
SCREAMING_SNAKE_CASE : Optional[int] = target_dict.eos_index
SCREAMING_SNAKE_CASE : Optional[int] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(_A , """vocab.json""" )
if not os.path.isdir(_A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
SCREAMING_SNAKE_CASE : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 1
with open(_A , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_A , _A )
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_A , )
SCREAMING_SNAKE_CASE : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
SCREAMING_SNAKE_CASE : Any = WavaVecaConformerForCTC(_A )
else:
SCREAMING_SNAKE_CASE : Dict = WavaVecaConformerForPreTraining(_A )
if is_finetuned:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
SCREAMING_SNAKE_CASE : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
SCREAMING_SNAKE_CASE : Any = fairseq.tasks.setup_task(_A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_A )
SCREAMING_SNAKE_CASE : Optional[int] = model[0].eval()
recursively_load_weights(_A , _A , not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 245 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase__ : Tuple = logging.getLogger(__name__)
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] ="""token-classification"""
def __init__( self : Tuple , UpperCAmelCase__ : Tuple ) ->Optional[Any]:
"""simple docstring"""
if type(UpperCAmelCase__ ) == dict:
SCREAMING_SNAKE_CASE : List[str] = Namespace(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = import_module("""tasks""" )
try:
SCREAMING_SNAKE_CASE : str = getattr(UpperCAmelCase__ , hparams.task_type )
SCREAMING_SNAKE_CASE : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
SCREAMING_SNAKE_CASE : List[Any] = self.token_classification_task.get_labels(hparams.labels )
SCREAMING_SNAKE_CASE : List[Any] = CrossEntropyLoss().ignore_index
super().__init__(UpperCAmelCase__ , len(self.labels ) , self.mode )
def _lowercase ( self : List[str] , **UpperCAmelCase__ : Optional[int] ) ->Any:
"""simple docstring"""
return self.model(**UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = self(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowercase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
SCREAMING_SNAKE_CASE : Any = self._feature_file(UpperCAmelCase__ )
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = torch.load(UpperCAmelCase__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
SCREAMING_SNAKE_CASE : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.token_classification_task.convert_examples_to_features(
UpperCAmelCase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCAmelCase__ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) ->DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self._feature_file(UpperCAmelCase__ )
logger.info("""Loading features from cached file %s""" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.load(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
SCREAMING_SNAKE_CASE : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
SCREAMING_SNAKE_CASE : Dict = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , batch_size=UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) ->Tuple:
"""simple docstring"""
"""Compute validation""" ""
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE : str = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE : Dict = self(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = outputs[:2]
SCREAMING_SNAKE_CASE : Optional[Any] = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE : Tuple = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self : int , UpperCAmelCase__ : List[str] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
SCREAMING_SNAKE_CASE : str = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = np.argmax(UpperCAmelCase__ , axis=2 )
SCREAMING_SNAKE_CASE : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(enumerate(self.labels ) )
SCREAMING_SNAKE_CASE : int = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
SCREAMING_SNAKE_CASE : Tuple = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""precision""": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""recall""": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""f1""": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
SCREAMING_SNAKE_CASE : Optional[int] = dict(results.items() )
SCREAMING_SNAKE_CASE : Optional[Any] = results
return ret, preds_list, out_label_list
def _lowercase ( self : Dict , UpperCAmelCase__ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self._eval_end(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self._eval_end(UpperCAmelCase__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
SCREAMING_SNAKE_CASE : Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ) ->List[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCAmelCase__ , UpperCAmelCase__ )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=UpperCAmelCase__ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=UpperCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=UpperCAmelCase__ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=UpperCAmelCase__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
UpperCAmelCase__ : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase__ : Tuple = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase__ : int = parser.parse_args()
UpperCAmelCase__ : Union[str, Any] = NERTransformer(args)
UpperCAmelCase__ : str = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase__ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
UpperCAmelCase__ : Any = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 245 | 1 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
_lowercase : Union[str, Any] = '1'
_lowercase : str = '0'
_lowercase : List[Any] = '1'
_lowercase : Dict = ort.SessionOptions()
_lowercase : int = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
_lowercase : Optional[Any] = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
_lowercase : Optional[int] = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
_lowercase : str = ort.RunOptions()
_lowercase : Optional[Any] = 1_28
_lowercase : str = 1
_lowercase : Optional[int] = np.ones((batch, sequence), dtype=np.intaa)
_lowercase : Dict = np.ones((batch, sequence), dtype=np.intaa)
_lowercase : Dict = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
_lowercase : Any = time.time()
_lowercase : List[str] = 20_00
_lowercase : List[Any] = {}
for iter in range(max_iters):
_lowercase : Any = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 10_00 / max_iters))
| 86 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_lowercase : List[Any] = TypeVar('T')
class _UpperCAmelCase ( Generic[T] ):
a__ : deque[T] # Cache store of keys
a__ : set[T] # References of the keys in cache
a__ : int = 10 # Maximum capacity of cache
def __init__( self : Optional[Any] , _lowercase : int ):
__UpperCAmelCase = deque()
__UpperCAmelCase = set()
if not n:
__UpperCAmelCase = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
__UpperCAmelCase = n
def a ( self : Optional[Any] , _lowercase : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__UpperCAmelCase = self.dq_store.pop()
self.key_reference.remove(_lowercase )
else:
self.dq_store.remove(_lowercase )
self.dq_store.appendleft(_lowercase )
self.key_reference.add(_lowercase )
def a ( self : str ):
for k in self.dq_store:
print(_lowercase )
def __repr__( self : Dict ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 86 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def A (__A : Optional[int] , __A : Any , __A : str=1024 , __A : Tuple=1024 , __A : int=False , **__A : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__A )
UpperCAmelCase_ = SeqaSeqDataset(__A , __A , __A , __A , type_path='''train''' , **__A )
UpperCAmelCase_ = tok.pad_token_id
def get_lens(__A : Optional[int] ):
UpperCAmelCase_ = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ = []
for batch in dl:
UpperCAmelCase_ = batch['''input_ids'''].ne(__A ).sum(1 ).tolist()
UpperCAmelCase_ = batch['''labels'''].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCAmelCase_ = get_lens(__A )
UpperCAmelCase_ = SeqaSeqDataset(__A , __A , __A , __A , type_path='''val''' , **__A )
UpperCAmelCase_ = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 51 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 325 | 0 |
'''simple docstring'''
def a__ ( lowercase : list, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = length or len(lowercase )
_UpperCamelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_UpperCamelCase , _UpperCamelCase = list_data[i + 1], list_data[i]
_UpperCamelCase = True
return list_data if not swapped else bubble_sort(lowercase, length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCamelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 287 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a ( __a ):
__a : Optional[Any] = (DEISMultistepScheduler,)
__a : Any = (("""num_inference_steps""", 25),)
def A ( self : Any , **lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**lowercase )
return config
def A ( self : Union[str, Any] , lowercase : Optional[Any]=0 , **lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase , UpperCAmelCase = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : int ):
'''simple docstring'''
pass
def A ( self : str , lowercase : Any=0 , **lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : Any , lowercase : List[str]=None , **lowercase : List[Any] ):
'''simple docstring'''
if scheduler is None:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = model(lowercase , lowercase )
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , '''set_timesteps''' ):
UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase = scheduler.timesteps[5]
UpperCAmelCase = scheduler.timesteps[6]
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCAmelCase = self.full_loop(scheduler=lowercase )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = self.full_loop(scheduler=lowercase )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def A ( self : Dict ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A ( self : int ):
'''simple docstring'''
self.check_over_configs(thresholding=lowercase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , algorithm_type='''deis''' , solver_order=lowercase , solver_type=lowercase , )
def A ( self : Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def A ( self : Tuple ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
UpperCAmelCase = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
assert not torch.isnan(lowercase ).any(), "Samples have nan numbers"
def A ( self : int ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase )
self.check_over_configs(lower_order_final=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0 )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop()
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 )
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = model(lowercase , lowercase )
UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
assert sample.dtype == torch.floataa
| 34 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowercase : Any = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
default='''tab_fact''', metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''})
UpperCamelCase__ = field(
default='''tab_fact''', metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''}, )
UpperCamelCase__ = field(
default=1024, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''A csv or a json file containing the training data.'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''A csv or a json file containing the validation data.'''})
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''A csv or a json file containing the test data.'''})
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
lowercase_ : List[str] = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase_ : Optional[int] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
UpperCamelCase__ = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def lowerCamelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ : Any = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase_ : Tuple = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
datasets.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase_ : List[str] = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase_ : int = data_args.train_file.split(""".""" )[-1]
lowercase_ : Any = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase_ : List[str] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
lowercase_ : List[str] = load_dataset("""csv""" , data_files=UpperCAmelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase_ : Optional[Any] = load_dataset("""json""" , data_files=UpperCAmelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase_ : List[Any] = raw_datasets["""train"""].features["""label"""].names
lowercase_ : Dict = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase_ : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=UpperCAmelCase__ , )
lowercase_ : List[str] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase_ : Any = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase_ : int = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase_ : Dict = {"""Refused""": 0, """Entailed""": 1}
lowercase_ : Optional[int] = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
lowercase_ : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(UpperCAmelCase__ : List[str] ):
# Tokenize the texts
def _convert_table_text_to_pandas(UpperCAmelCase__ : Dict ):
lowercase_ : Optional[int] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
lowercase_ : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase_ : List[Any] = examples["""statement"""]
lowercase_ : Dict = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
lowercase_ : Optional[int] = tokenizer(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ )
lowercase_ : Union[str, Any] = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
lowercase_ : Optional[Any] = raw_datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
lowercase_ : List[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
lowercase_ : Union[str, Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
lowercase_ : Union[str, Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
lowercase_ : str = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
lowercase_ : Optional[Any] = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
lowercase_ : Any = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(UpperCAmelCase__ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase__ : EvalPrediction ):
lowercase_ : Optional[Any] = p.predictions[0] if isinstance(p.predictions , UpperCAmelCase__ ) else p.predictions
lowercase_ : Optional[int] = np.argmax(UpperCAmelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase_ : Tuple = default_data_collator
elif training_args.fpaa:
lowercase_ : int = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 )
else:
lowercase_ : Union[str, Any] = None
# Initialize our Trainer
lowercase_ : Any = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowercase_ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ : Tuple = last_checkpoint
lowercase_ : Tuple = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
lowercase_ : Optional[Any] = train_result.metrics
lowercase_ : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase__ )
)
lowercase_ : int = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , UpperCAmelCase__ )
trainer.save_metrics("""train""" , UpperCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase_ : Union[str, Any] = trainer.evaluate(eval_dataset=UpperCAmelCase__ )
lowercase_ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase_ : Optional[Any] = predict_dataset.remove_columns("""label""" )
lowercase_ : Any = trainer.predict(UpperCAmelCase__ , metric_key_prefix="""predict""" ).predictions
lowercase_ : str = np.argmax(UpperCAmelCase__ , axis=1 )
lowercase_ : Union[str, Any] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(UpperCAmelCase__ ):
lowercase_ : List[Any] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
lowercase_ : Optional[int] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 21 | '''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21 | 1 |
UpperCamelCase__ = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
UpperCamelCase__ = frozenset(["""prompt""", """negative_prompt"""])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(["""image"""])
UpperCamelCase__ = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ = frozenset(["""image"""])
UpperCamelCase__ = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
UpperCamelCase__ = frozenset(["""prompt""", """image""", """negative_prompt"""])
UpperCamelCase__ = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
UpperCamelCase__ = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
UpperCamelCase__ = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ = frozenset(["""image""", """mask_image"""])
UpperCamelCase__ = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
UpperCamelCase__ = frozenset(["""example_image""", """image""", """mask_image"""])
UpperCamelCase__ = frozenset(["""class_labels"""])
UpperCamelCase__ = frozenset(["""class_labels"""])
UpperCamelCase__ = frozenset(["""batch_size"""])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(["""batch_size"""])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
UpperCamelCase__ = frozenset(["""prompt""", """negative_prompt"""])
UpperCamelCase__ = frozenset(["""input_tokens"""])
UpperCamelCase__ = frozenset(["""input_tokens"""])
| 92 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 1 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> list:
if len(_snake_case ) <= 1:
return lst
UpperCAmelCase_ = 1
while i < len(_snake_case ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase_ = 1
return lst
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 352 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class a ( _A , _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCAmelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ = CLIPTextModel(__snake_case )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=0 ):
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(__snake_case )
else:
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class a ( _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Optional[int] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__snake_case : Tuple ):
if isinstance(__snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ = CLIPTextModel(__snake_case )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : Any , __snake_case : Optional[Any]=0 ):
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(__snake_case )
else:
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
UpperCAmelCase_ = 10.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case )[0]
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowerCamelCase_ ( self : Optional[int] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : List[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=__snake_case , controlnet=__snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) )
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) )
UpperCAmelCase_ = pipe(
__snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 177 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase : Dict = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
_lowerCamelCase : Union[str, Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
_lowerCamelCase : Optional[Any] = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Any=0.9 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=500 , UpperCAmelCase__ : str="gpt2-large" , UpperCAmelCase__ : Dict=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : Any=25 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=25 , ) ->Any:
'''simple docstring'''
A__ = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 14 |
"""simple docstring"""
import random
from typing import Any
def _A (__a ) -> list[Any]:
"""simple docstring"""
for _ in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = random.randint(0 , len(__a ) - 1 )
SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(__a ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 91 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
A_ : int = namedtuple("covid_data", "cases deaths recovered")
def lowerCamelCase_ ( _lowerCamelCase = "https://www.worldometers.info/coronavirus/" ):
lowerCamelCase__ : Any = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(_lowerCamelCase ).content ).xpath(_lowerCamelCase ) )
A_ : Dict = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 360 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316 | 0 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
a__ = ["""vqvae"""]
def __init__( self , lowercase , lowercase , lowercase , lowercase , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __UpperCAmelCase) else 10_00
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = None , lowercase = None , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase=True , ) -> int:
'''simple docstring'''
a__: Dict = steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase)
a__: str = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a__: List[str] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a__: str = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
a__: List[str] = noise
a__: Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase)
a__: str = self.mel.audio_slice_to_image(__UpperCAmelCase)
a__: Optional[int] = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a__: Any = (input_image / 2_55) * 2 - 1
a__: int = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a__: Tuple = self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0)).latent_dist.sample(
generator=__UpperCAmelCase)[0]
a__: Union[str, Any] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a__: List[str] = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1])
a__: Optional[int] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a__: Any = int(mask_start_secs * pixels_per_second)
a__: Optional[Any] = int(mask_end_secs * pixels_per_second)
a__: int = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __UpperCAmelCase):
a__: Optional[Any] = self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)['sample']
else:
a__: int = self.unet(__UpperCAmelCase , __UpperCAmelCase)['sample']
if isinstance(self.scheduler , __UpperCAmelCase):
a__: int = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )['prev_sample']
else:
a__: List[Any] = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
a__: str = mask[:, step, :, :mask_start]
if mask_end > 0:
a__: List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a__: List[Any] = 1 / self.vqvae.config.scaling_factor * images
a__: Optional[int] = self.vqvae.decode(__UpperCAmelCase)['sample']
a__: Optional[Any] = (images / 2 + 0.5).clamp(0 , 1)
a__: Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a__: Optional[int] = (images * 2_55).round().astype('uint8')
a__: Optional[Any] = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode='RGB').convert('L') for _ in images))
a__: Optional[int] = [self.mel.image_to_audio(__UpperCAmelCase) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase)[:, np.newaxis, :]) , **ImagePipelineOutput(__UpperCAmelCase))
@torch.no_grad()
def lowerCamelCase_ ( self , lowercase , lowercase = 50) -> Tuple:
'''simple docstring'''
assert isinstance(self.scheduler , __UpperCAmelCase)
self.scheduler.set_timesteps(__UpperCAmelCase)
a__: Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a__: Optional[int] = (sample / 2_55) * 2 - 1
a__: Optional[Any] = torch.Tensor(__UpperCAmelCase).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a__: str = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a__: Optional[int] = self.scheduler.alphas_cumprod[t]
a__: str = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a__: Any = 1 - alpha_prod_t
a__: List[str] = self.unet(__UpperCAmelCase , __UpperCAmelCase)['sample']
a__: Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a__: str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a__: Any = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCamelCase_ ( lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__: int = acos(torch.dot(torch.flatten(__UpperCAmelCase) , torch.flatten(__UpperCAmelCase)) / torch.norm(__UpperCAmelCase) / torch.norm(__UpperCAmelCase))
return sin((1 - alpha) * theta) * xa / sin(__UpperCAmelCase) + sin(alpha * theta) * xa / sin(__UpperCAmelCase)
| 290 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
__UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase ))
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 316 | 0 |
def __lowerCAmelCase ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 352 |
from __future__ import annotations
from typing import TypedDict
class _snake_case ( lowercase__):
UpperCamelCase__ : str
UpperCamelCase__ : int
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
lowercase__ = all_rotations(SCREAMING_SNAKE_CASE_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowercase__ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE_ ),
}
return response
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
lowercase__ = int(SCREAMING_SNAKE_CASE_ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
lowercase__ = [""] * len(SCREAMING_SNAKE_CASE_ )
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase_ = """Provide a string that I will generate its BWT transform: """
lowercase_ = input(entry_msg).strip()
lowercase_ = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
lowercase_ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 224 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Dict, _lowerCAmelCase : Tuple=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_a = """"""
else:
_a = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
_a = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[
: config.hidden_size, :
]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase, _lowerCAmelCase )
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = dct.pop(_lowerCAmelCase )
_a = val
def A_ ( ):
"""simple docstring"""
_a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any]=True ):
"""simple docstring"""
_a = ViTConfig()
# patch_size
if model_name[-1] == "8":
_a = 8
# set labels if required
if not base_model:
_a = 10_00
_a = """huggingface/label-files"""
_a = """imagenet-1k-id2label.json"""
_a = json.load(open(hf_hub_download(_lowerCAmelCase, _lowerCAmelCase, repo_type='''dataset''' ), '''r''' ) )
_a = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_a = 3_84
_a = 15_36
_a = 12
_a = 6
# load original model from torch hub
_a = torch.hub.load('''facebookresearch/dino:main''', _lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_a = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_a = create_rename_keys(_lowerCAmelCase, base_model=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
# load HuggingFace model
if base_model:
_a = ViTModel(_lowerCAmelCase, add_pooling_layer=_lowerCAmelCase ).eval()
else:
_a = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_a = ViTImageProcessor()
_a = image_processor(images=prepare_img(), return_tensors='''pt''' )
_a = encoding["""pixel_values"""]
_a = model(_lowerCAmelCase )
if base_model:
_a = original_model(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase, outputs.last_hidden_state[:, 0, :], atol=1e-1 )
else:
_a = original_model(_lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase, outputs.logits, atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
__snake_case = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 320 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case : str = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class A ( __lowerCAmelCase ):
lowercase_ = '''instructblip_vision_model'''
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any]=14_08 , lowerCAmelCase_ : Tuple=61_44 , lowerCAmelCase_ : Optional[int]=39 , lowerCAmelCase_ : int=16 , lowerCAmelCase_ : Optional[int]=2_24 , lowerCAmelCase_ : int=14 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Dict=1e-6 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=1e-10 , lowerCAmelCase_ : Optional[int]=True , **lowerCAmelCase_ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = patch_size
_a = image_size
_a = initializer_range
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
_a = qkv_bias
@classmethod
def __lowerCAmelCase ( cls : List[str] , lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase_ )
_a , _a = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
_a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class A ( __lowerCAmelCase ):
lowercase_ = '''instructblip_qformer'''
def __init__( self : int , lowerCAmelCase_ : List[str]=3_05_22 , lowerCAmelCase_ : List[str]=7_68 , lowerCAmelCase_ : Union[str, Any]=12 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : str=30_72 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Optional[Any]=5_12 , lowerCAmelCase_ : Tuple=0.0_2 , lowerCAmelCase_ : Tuple=1e-12 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : Optional[int]="absolute" , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Optional[int]=14_08 , **lowerCAmelCase_ : Any , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = cross_attention_frequency
_a = encoder_hidden_size
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase_ )
_a , _a = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
_a = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class A ( __lowerCAmelCase ):
lowercase_ = '''instructblip'''
lowercase_ = True
def __init__( self : Optional[int] , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[str]=32 , **lowerCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if vision_config is None:
_a = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
_a = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
_a = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
_a = InstructBlipVisionConfig(**lowerCAmelCase_ )
_a = InstructBlipQFormerConfig(**lowerCAmelCase_ )
_a = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
_a = CONFIG_MAPPING[text_model_type](**lowerCAmelCase_ )
_a = self.text_config.tie_word_embeddings
_a = self.text_config.is_encoder_decoder
_a = num_query_tokens
_a = self.vision_config.hidden_size
_a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_a = 1.0
_a = 0.0_2
@classmethod
def __lowerCAmelCase ( cls : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[str] , ) -> Any:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_a = copy.deepcopy(self.__dict__ )
_a = self.vision_config.to_dict()
_a = self.qformer_config.to_dict()
_a = self.text_config.to_dict()
_a = self.__class__.model_type
return output
| 365 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[str] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_snake_case : int = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_snake_case : int = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = BertTokenizer
def __init__( self : str , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Tuple="[PAD]" , lowerCAmelCase_ : Tuple="[CLS]" , lowerCAmelCase_ : Optional[int]="[MASK]" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 179 | 0 |
import os
def a__ ( __UpperCamelCase = "input.txt" ):
with open(os.path.join(os.path.dirname(__UpperCamelCase ) , __UpperCamelCase ) ) as input_file:
SCREAMING_SNAKE_CASE_ = [
[int(__UpperCamelCase ) for element in line.split("," )]
for line in input_file.readlines()
]
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = len(matrix[0] )
SCREAMING_SNAKE_CASE_ = [[-1 for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[i][0]
for j in range(1 , __UpperCamelCase ):
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
SCREAMING_SNAKE_CASE_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 118 | def a__ ( __UpperCamelCase ):
if not head:
return True
# split the list to two parts
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = head.next, head
while fast and fast.next:
SCREAMING_SNAKE_CASE_ = fast.next.next
SCREAMING_SNAKE_CASE_ = slow.next
SCREAMING_SNAKE_CASE_ = slow.next
SCREAMING_SNAKE_CASE_ = None # Don't forget here! But forget still works!
# reverse the second part
SCREAMING_SNAKE_CASE_ = None
while second:
SCREAMING_SNAKE_CASE_ = second.next
SCREAMING_SNAKE_CASE_ = node
SCREAMING_SNAKE_CASE_ = second
SCREAMING_SNAKE_CASE_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
SCREAMING_SNAKE_CASE_ = node.next
SCREAMING_SNAKE_CASE_ = head.next
return True
def a__ ( __UpperCamelCase ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = head
while fast and fast.next:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fast.next.next, slow.next
# 2. Push the second half into the stack
SCREAMING_SNAKE_CASE_ = [slow.val]
while slow.next:
SCREAMING_SNAKE_CASE_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
SCREAMING_SNAKE_CASE_ = cur.next
return True
def a__ ( __UpperCamelCase ):
if not head or not head.next:
return True
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
while head:
if head.val in d:
d[head.val].append(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = [pos]
SCREAMING_SNAKE_CASE_ = head.next
pos += 1
SCREAMING_SNAKE_CASE_ = pos - 1
SCREAMING_SNAKE_CASE_ = 0
for v in d.values():
if len(__UpperCamelCase ) % 2 != 0:
middle += 1
else:
SCREAMING_SNAKE_CASE_ = 0
for i in range(0 , len(__UpperCamelCase ) ):
if v[i] + v[len(__UpperCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 118 | 1 |
def A (__A : list , __A : list ) -> float:
"""simple docstring"""
_validate_point(__A )
_validate_point(__A )
if len(__A ) != len(__A ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(__A , __A ) ) )
def A (__A : list[float] ) -> None:
"""simple docstring"""
if point:
if isinstance(__A , __A ):
for item in point:
if not isinstance(__A , (int, float) ):
UpperCAmelCase_ = (
'''Expected a list of numbers as input, found '''
F"""{type(__A ).__name__}"""
)
raise TypeError(__A )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(__A ).__name__}"""
raise TypeError(__A )
else:
raise ValueError('''Missing an input''' )
def A (__A : list , __A : list ) -> float:
"""simple docstring"""
_validate_point(__A )
_validate_point(__A )
if len(__A ) != len(__A ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(__A , __A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""MaskFormerFeatureExtractor"""]
__snake_case = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
__snake_case = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 259 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MaskFormerFeatureExtractor"]
UpperCamelCase_ = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
UpperCamelCase_ = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 251 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =[]
create_all_state(1 , __UpperCamelCase , __UpperCamelCase , [] , __UpperCamelCase )
return result
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] , __UpperCamelCase : list[list[int]] , ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__UpperCamelCase , total_number - level + 2 ):
current_list.append(__UpperCamelCase )
create_all_state(i + 1 , __UpperCamelCase , level - 1 , __UpperCamelCase , __UpperCamelCase )
current_list.pop()
def lowerCAmelCase (__UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in total_list:
print(*__UpperCamelCase )
if __name__ == "__main__":
__lowercase = 4
__lowercase = 2
__lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 357 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
__lowercase = {
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
__UpperCamelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
__UpperCamelCase =getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
__UpperCamelCase =add_prefix_space
__UpperCamelCase =pre_tok_class(**UpperCamelCase__ )
__UpperCamelCase =add_prefix_space
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCamelCase =self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : "Conversation" ) -> List[int]:
'''simple docstring'''
__UpperCamelCase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
__UpperCamelCase =input_ids[-self.model_max_length :]
return input_ids
| 85 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
A__ : Any =logging.get_logger(__name__)
# General docstring
A__ : Optional[int] ='''RegNetConfig'''
# Base docstring
A__ : Union[str, Any] ='''facebook/regnet-y-040'''
A__ : Optional[Any] =[1, 10_88, 7, 7]
# Image classification docstring
A__ : int ='''facebook/regnet-y-040'''
A__ : List[Any] ='''tabby, tabby cat'''
A__ : Any =[
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Dict , __snake_case : List[Any] , __snake_case : Optional[Any] = 3 , __snake_case : Optional[int] = 1 , __snake_case : str = 1 , __snake_case : Optional[Any] = "relu" , **__snake_case : Optional[Any] , ) -> Optional[Any]:
super().__init__(**__snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCAmelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCAmelCase = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=__snake_case , strides=__snake_case , padding="""VALID""" , groups=__snake_case , use_bias=__snake_case , name="""convolution""" , )
_lowerCAmelCase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
_lowerCAmelCase = ACTaFN[activation] if activation is not None else tf.identity
def lowercase__ ( self : Dict , __snake_case : Tuple ) -> str:
_lowerCAmelCase = self.convolution(self.padding(__snake_case ) )
_lowerCAmelCase = self.normalization(__snake_case )
_lowerCAmelCase = self.activation(__snake_case )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __snake_case : Optional[Any] , **__snake_case : Optional[int] ) -> Optional[int]:
super().__init__(**__snake_case )
_lowerCAmelCase = config.num_channels
_lowerCAmelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowercase__ ( self : Any , __snake_case : str ) -> Union[str, Any]:
_lowerCAmelCase = shape_list(__snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCAmelCase = tf.transpose(__snake_case , perm=(0, 2, 3, 1) )
_lowerCAmelCase = self.embedder(__snake_case )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Dict , __snake_case : List[str] , __snake_case : int = 2 , **__snake_case : Any ) -> List[str]:
super().__init__(**__snake_case )
_lowerCAmelCase = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=1 , strides=__snake_case , use_bias=__snake_case , name="""convolution""" )
_lowerCAmelCase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def lowercase__ ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : List[str] = False ) -> tf.Tensor:
return self.normalization(self.convolution(__snake_case ) , training=__snake_case )
class UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int] , **__snake_case : Tuple ) -> str:
super().__init__(**__snake_case )
_lowerCAmelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name="""pooler""" )
_lowerCAmelCase = [
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowercase__ ( self : int , __snake_case : Optional[Any] ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_lowerCAmelCase = self.pooler(__snake_case )
for layer_module in self.attention:
_lowerCAmelCase = layer_module(__snake_case )
_lowerCAmelCase = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Any , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Tuple = 1 , **__snake_case : Optional[Any] ) -> int:
super().__init__(**__snake_case )
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCAmelCase = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name="""layer.2""" ),
]
_lowerCAmelCase = ACTaFN[config.hidden_act]
def lowercase__ ( self : int , __snake_case : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = hidden_state
for layer_module in self.layers:
_lowerCAmelCase = layer_module(__snake_case )
_lowerCAmelCase = self.shortcut(__snake_case )
hidden_state += residual
_lowerCAmelCase = self.activation(__snake_case )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict = 1 , **__snake_case : List[Any] ) -> Tuple:
super().__init__(**__snake_case )
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
_lowerCAmelCase = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name="""layer.3""" ),
]
_lowerCAmelCase = ACTaFN[config.hidden_act]
def lowercase__ ( self : Dict , __snake_case : int ) -> Optional[int]:
_lowerCAmelCase = hidden_state
for layer_module in self.layers:
_lowerCAmelCase = layer_module(__snake_case )
_lowerCAmelCase = self.shortcut(__snake_case )
hidden_state += residual
_lowerCAmelCase = self.activation(__snake_case )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Any , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Optional[Any] = 2 , __snake_case : List[Any] = 2 , **__snake_case : Tuple ) -> int:
super().__init__(**__snake_case )
_lowerCAmelCase = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
_lowerCAmelCase = [
# downsampling is done in the first layer with stride of 2
layer(__snake_case , __snake_case , __snake_case , stride=__snake_case , name="""layers.0""" ),
*[layer(__snake_case , __snake_case , __snake_case , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def lowercase__ ( self : Tuple , __snake_case : Tuple ) -> Optional[Any]:
for layer_module in self.layers:
_lowerCAmelCase = layer_module(__snake_case )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : List[Any] , **__snake_case : Tuple ) -> Tuple:
super().__init__(**__snake_case )
_lowerCAmelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
_lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case , name=f"stages.{i+1}" ) )
def lowercase__ ( self : Dict , __snake_case : Optional[Any] , __snake_case : int = False , __snake_case : int = True ) -> TFBaseModelOutputWithNoAttention:
_lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
_lowerCAmelCase = stage_module(__snake_case )
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
_lowercase: str = RegNetConfig
def __init__( self : Optional[Any] , __snake_case : Any , **__snake_case : List[Any] ) -> Any:
super().__init__(**__snake_case )
_lowerCAmelCase = config
_lowerCAmelCase = TFRegNetEmbeddings(__snake_case , name="""embedder""" )
_lowerCAmelCase = TFRegNetEncoder(__snake_case , name="""encoder""" )
_lowerCAmelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name="""pooler""" )
@unpack_inputs
def lowercase__ ( self : int , __snake_case : List[str] , __snake_case : Any = None , __snake_case : List[Any] = None , __snake_case : List[str] = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
_lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.embedder(__snake_case , training=__snake_case )
_lowerCAmelCase = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(__snake_case )
# Change to NCHW output format have uniformity in the modules
_lowerCAmelCase = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
_lowerCAmelCase = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCAmelCase = tuple([tf.transpose(__snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( snake_case_ ):
_lowercase: List[str] = RegNetConfig
_lowercase: str = '''regnet'''
_lowercase: Optional[int] = '''pixel_values'''
@property
def lowercase__ ( self : str ) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
A__ : Dict =r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
A__ : Any =r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , snake_case_ , )
class UpperCAmelCase ( snake_case_ ):
def __init__( self : List[Any] , __snake_case : List[Any] , *__snake_case : str , **__snake_case : Optional[int] ) -> Any:
super().__init__(__snake_case , *__snake_case , **__snake_case )
_lowerCAmelCase = TFRegNetMainLayer(__snake_case , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase__ ( self : Dict , __snake_case : Optional[Any] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : List[str]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
_lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.regnet(
pixel_values=__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , snake_case_ , )
class UpperCAmelCase ( snake_case_ , snake_case_ ):
def __init__( self : str , __snake_case : Tuple , *__snake_case : int , **__snake_case : Optional[int] ) -> str:
super().__init__(__snake_case , *__snake_case , **__snake_case )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = TFRegNetMainLayer(__snake_case , name="""regnet""" )
# classification head
_lowerCAmelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase__ ( self : Tuple , __snake_case : Optional[int] = None , __snake_case : int = None , __snake_case : Tuple = None , __snake_case : int = None , __snake_case : int=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
_lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.regnet(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
_lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase = self.classifier[0](__snake_case )
_lowerCAmelCase = self.classifier[1](__snake_case )
_lowerCAmelCase = None if labels is None else self.hf_compute_loss(labels=__snake_case , logits=__snake_case )
if not return_dict:
_lowerCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 70 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = """PoolFormerConfig"""
# Base docstring
lowercase_ = """sail/poolformer_s12"""
lowercase_ = [1, 512, 7, 7]
# Image classification docstring
lowercase_ = """sail/poolformer_s12"""
lowercase_ = """tabby, tabby cat"""
lowercase_ = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : float = 0.0 , __lowerCamelCase : bool = False ) ->int:
if drop_prob == 0.0 or not training:
return input
_SCREAMING_SNAKE_CASE = 1 - drop_prob
_SCREAMING_SNAKE_CASE = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_SCREAMING_SNAKE_CASE = keep_prob + torch.rand(__lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_SCREAMING_SNAKE_CASE = input.div(__lowerCamelCase ) * random_tensor
return output
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A = None ) -> None:
super().__init__()
_SCREAMING_SNAKE_CASE = drop_prob
def snake_case_( self , A ) -> torch.Tensor:
return drop_path(A , self.drop_prob , self.training )
def snake_case_( self ) -> str:
return "p={}".format(self.drop_prob )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A=None ) -> Union[str, Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = patch_size if isinstance(A , collections.abc.Iterable ) else (patch_size, patch_size)
_SCREAMING_SNAKE_CASE = stride if isinstance(A , collections.abc.Iterable ) else (stride, stride)
_SCREAMING_SNAKE_CASE = padding if isinstance(A , collections.abc.Iterable ) else (padding, padding)
_SCREAMING_SNAKE_CASE = nn.Convad(A , A , kernel_size=A , stride=A , padding=A )
_SCREAMING_SNAKE_CASE = norm_layer(A ) if norm_layer else nn.Identity()
def snake_case_( self , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.projection(A )
_SCREAMING_SNAKE_CASE = self.norm(A )
return embeddings
class a_ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , A , **A ) -> Union[str, Any]:
super().__init__(1 , A , **A )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> Union[str, Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.AvgPoolad(A , stride=1 , padding=pool_size // 2 , count_include_pad=A )
def snake_case_( self , A ) -> Union[str, Any]:
return self.pool(A ) - hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Convad(A , A , 1 )
_SCREAMING_SNAKE_CASE = nn.Convad(A , A , 1 )
_SCREAMING_SNAKE_CASE = PoolFormerDropPath(A )
if isinstance(config.hidden_act , A ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
def snake_case_( self , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.conva(A )
_SCREAMING_SNAKE_CASE = self.act_fn(A )
_SCREAMING_SNAKE_CASE = self.drop(A )
_SCREAMING_SNAKE_CASE = self.conva(A )
_SCREAMING_SNAKE_CASE = self.drop(A )
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A ) -> Union[str, Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = PoolFormerPooling(A )
_SCREAMING_SNAKE_CASE = PoolFormerOutput(A , A , A , A )
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(A )
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(A )
# Useful for training neural nets
_SCREAMING_SNAKE_CASE = PoolFormerDropPath(A ) if drop_path > 0.0 else nn.Identity()
_SCREAMING_SNAKE_CASE = config.use_layer_scale
if config.use_layer_scale:
_SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((A) ) , requires_grad=A )
_SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((A) ) , requires_grad=A )
def snake_case_( self , A ) -> Optional[Any]:
if self.use_layer_scale:
_SCREAMING_SNAKE_CASE = self.pooling(self.before_norm(A ) )
_SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(A )
_SCREAMING_SNAKE_CASE = ()
_SCREAMING_SNAKE_CASE = self.output(self.after_norm(A ) )
_SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(A )
_SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
else:
_SCREAMING_SNAKE_CASE = self.drop_path(self.pooling(self.before_norm(A ) ) )
# First residual connection
_SCREAMING_SNAKE_CASE = pooling_output + hidden_states
_SCREAMING_SNAKE_CASE = ()
# Second residual connection inside the PoolFormerOutput block
_SCREAMING_SNAKE_CASE = self.drop_path(self.output(self.after_norm(A ) ) )
_SCREAMING_SNAKE_CASE = hidden_states + layer_output
_SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> Any:
super().__init__()
_SCREAMING_SNAKE_CASE = config
# stochastic depth decay rule
_SCREAMING_SNAKE_CASE = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_SCREAMING_SNAKE_CASE = nn.ModuleList(A )
# Transformer blocks
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_SCREAMING_SNAKE_CASE = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(A ) )
_SCREAMING_SNAKE_CASE = nn.ModuleList(A )
def snake_case_( self , A , A=False , A=True ) -> List[Any]:
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
_SCREAMING_SNAKE_CASE = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = layers
# Get patch embeddings from hidden_states
_SCREAMING_SNAKE_CASE = embedding_layer(A )
# Send the embeddings through the blocks
for _, blk in enumerate(A ):
_SCREAMING_SNAKE_CASE = blk(A )
_SCREAMING_SNAKE_CASE = layer_outputs[0]
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=A , hidden_states=A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = PoolFormerConfig
UpperCamelCase = '''poolformer'''
UpperCamelCase = '''pixel_values'''
UpperCamelCase = True
def snake_case_( self , A ) -> int:
if isinstance(A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case_( self , A , A=False ) -> Dict:
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = value
lowercase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , snake_case_ , )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A ) -> int:
super().__init__(A )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = PoolFormerEncoder(A )
# Initialize weights and apply final processing
self.post_init()
def snake_case_( self ) -> Any:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_( self , A = None , A = None , A = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.encoder(
A , output_hidden_states=A , return_dict=A , )
_SCREAMING_SNAKE_CASE = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=A , hidden_states=encoder_outputs.hidden_states , )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case_( self , A ) -> str:
_SCREAMING_SNAKE_CASE = self.dense(A )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , snake_case_ , )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A ) -> Optional[Any]:
super().__init__(A )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = PoolFormerModel(A )
# Final norm
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_SCREAMING_SNAKE_CASE = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_( self , A = None , A = None , A = None , A = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.poolformer(
A , output_hidden_states=A , return_dict=A , )
_SCREAMING_SNAKE_CASE = outputs[0]
_SCREAMING_SNAKE_CASE = self.classifier(self.norm(A ).mean([-2, -1] ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(A , A )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(A , A )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A , logits=A , hidden_states=outputs.hidden_states )
| 58 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : int = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE__ : List[Any] = len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(__lowerCAmelCase ) , """Postfix""".center(__lowerCAmelCase ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE__ : Optional[int] = """)""" # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE__ : Optional[Any] = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a :Optional[int] = input("\nEnter an Infix Equation = ") # Input an Infix equation
a :Dict = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 56 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
while b:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = b, a % b
return a
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def _lowercase ( ) -> Union[str, Any]:
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 56 | 1 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : List[str] = len(_lowerCAmelCase ) + 1
lowercase__ : Any = len(_lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase__ : List[str] = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
# since string of zero length match pattern of zero length
lowercase__ : Any = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCAmelCase ):
lowercase__ : Tuple = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCAmelCase ):
lowercase__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCAmelCase ):
for j in range(1 , _lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase__ : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase__ : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase__ : Tuple = dp[i - 1][j]
else:
lowercase__ : Tuple = 0
else:
lowercase__ : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase : Any = "aab"
_UpperCamelCase : int = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 77 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ = 6 )-> None:
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
self.create_linked_list(A_ )
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = Node()
UpperCamelCase = current_node
UpperCamelCase = current_node
UpperCamelCase = current_node
for _ in range(1 , A_ ):
UpperCamelCase = Node()
UpperCamelCase = current_node
UpperCamelCase = previous_node
UpperCamelCase = current_node
UpperCamelCase = self.front
UpperCamelCase = previous_node
def UpperCAmelCase_ ( self )-> bool:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCAmelCase_ ( self )-> Any | None:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCamelCase = self.rear.next
if self.rear:
UpperCamelCase = data
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCamelCase = self.front.data
UpperCamelCase = None
return data
UpperCamelCase = self.front
UpperCamelCase = old_front.next
UpperCamelCase = old_front.data
UpperCamelCase = None
return data
def UpperCAmelCase_ ( self )-> None:
'''simple docstring'''
if self.is_empty():
raise Exception('Empty Queue' )
def UpperCAmelCase_ ( self )-> None:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> None:
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251 |
'''simple docstring'''
from ... import PretrainedConfig
lowerCAmelCase : List[str] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase_ = """nezha"""
def __init__( self , A_=21128 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=64 , A_=2 , A_=0.02 , A_=1e-12 , A_=0.1 , A_=0 , A_=2 , A_=3 , A_=True , **A_ , )-> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = max_relative_position
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
| 251 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCAmelCase_ ( snake_case_ ):
return "".join(sorted(_snake_case ) )
def lowerCAmelCase_ ( snake_case_ ):
return word_by_signature[signature(_snake_case )]
_snake_case = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
_snake_case = sorted({word.strip().lower() for word in data.splitlines()})
_snake_case = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_snake_case = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 26 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ):
_A = spearmanr(_UpperCAmelCase , _UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 315 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase : Optional[int] = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class A__ ( __a ):
"""simple docstring"""
__A : Tuple = '''albert'''
def __init__( self , lowercase=3_0000 , lowercase=128 , lowercase=4096 , lowercase=12 , lowercase=1 , lowercase=64 , lowercase=1_6384 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__)
a__ : Optional[int] = vocab_size
a__ : str = embedding_size
a__ : List[str] = hidden_size
a__ : Optional[Any] = num_hidden_layers
a__ : Optional[Any] = num_hidden_groups
a__ : Any = num_attention_heads
a__ : List[Any] = inner_group_num
a__ : str = hidden_act
a__ : Union[str, Any] = intermediate_size
a__ : Optional[int] = hidden_dropout_prob
a__ : Optional[int] = attention_probs_dropout_prob
a__ : List[Any] = max_position_embeddings
a__ : Union[str, Any] = type_vocab_size
a__ : Tuple = initializer_range
a__ : Optional[int] = layer_norm_eps
a__ : str = classifier_dropout_prob
a__ : int = position_embedding_type
class A__ ( __a ):
"""simple docstring"""
@property
def __lowercase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__ : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 369 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__A : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def A_ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ , a__ , a__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
a__ : Optional[Any] = import_module('tasks' )
try:
a__ : List[Any] = getattr(A__ , model_args.task_type )
a__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a__ : Tuple = token_classification_task.get_labels(data_args.labels )
a__ : Dict[int, str] = dict(enumerate(A__ ) )
a__ : Union[str, Any] = len(A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , idalabel=A__ , labelaid={label: i for i, label in enumerate(A__ )} , cache_dir=model_args.cache_dir , )
a__ : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a__ : List[Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
# Get datasets
a__ : int = (
TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a__ : Optional[int] = (
TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A__ , A__ ) -> Tuple[List[int], List[int]]:
a__ : Union[str, Any] = np.argmax(A__ , axis=2 )
a__ , a__ : Dict = preds.shape
a__ : Union[str, Any] = [[] for _ in range(A__ )]
a__ : Optional[int] = [[] for _ in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A__ ) -> Dict:
a__ , a__ : Union[str, Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A__ , A__ ),
"precision": precision_score(A__ , A__ ),
"recall": recall_score(A__ , A__ ),
"f1": fa_score(A__ , A__ ),
}
# Data collator
a__ : Union[str, Any] = DataCollatorWithPadding(A__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a__ : List[str] = Trainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Any = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a__ : Optional[Any] = trainer.evaluate()
a__ : List[Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A__ , A__ )
writer.write('%s = %s\n' % (key, value) )
results.update(A__ )
# Predict
if training_args.do_predict:
a__ : Optional[Any] = TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a__ , a__ , a__ : Any = trainer.predict(A__ )
a__ , a__ : Union[str, Any] = align_predictions(A__ , A__ )
a__ : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A__ , A__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
a__ : Tuple = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A__ , A__ , A__ )
return results
def A_ ( A__ ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 225 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = PhobertTokenizer
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = ["""#version: 0.2""", """l à</w>"""]
UpperCamelCase = {"""unk_token""": """<unk>"""}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """Tôi là VinAI Research"""
UpperCamelCase = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = """Tôi là VinAI Research"""
UpperCamelCase = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ )
print(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
| 343 | from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
pass
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
def __iter__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self
UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_ )
yield node.data
UpperCamelCase = node.next_node
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Node(1)
_SCREAMING_SNAKE_CASE = Node(2)
_SCREAMING_SNAKE_CASE = Node(3)
_SCREAMING_SNAKE_CASE = Node(4)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = root_node.next_node
print(root_node.has_loop) # True
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = Node(1)
print(root_node.has_loop) # False
| 343 | 1 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : bool = True , snake_case__ : float = math.inf , snake_case__ : float = -math.inf , snake_case__ : float = math.inf , snake_case__ : float = -math.inf , snake_case__ : bool = False , snake_case__ : float = 1_00 , snake_case__ : float = 0.01 , snake_case__ : float = 1 , ):
"""simple docstring"""
_snake_case : Dict = False
_snake_case : int = search_prob
_snake_case : Dict = start_temperate
_snake_case : int = []
_snake_case : str = 0
_snake_case : Tuple = None
while not search_end:
_snake_case : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Tuple = current_state
scores.append(snake_case__ )
iterations += 1
_snake_case : Tuple = None
_snake_case : str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Union[str, Any] = random.randint(0 , len(snake_case__ ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(snake_case__ )
_snake_case : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : List[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Optional[int] = picked_neighbor
else:
_snake_case : str = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case__ ) , snake_case__ )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A_ = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
A_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A_ = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Optional[Any] ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
A_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
A_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
| 356 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[int] ):
'''simple docstring'''
super().__init__()
_snake_case : List[str] = nn.Linear(3, 4 )
_snake_case : int = nn.BatchNormad(4 )
_snake_case : List[str] = nn.Linear(4, 5 )
def UpperCamelCase_ ( self: Any, a_: Union[str, Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(a_ ) ) )
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: Any, a_: int, *a_: Dict, **a_: Dict ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: str, a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
return output + 1
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = ModelForTest()
_snake_case : List[str] = ModelHook()
add_hook_to_module(a_, a_ )
self.assertEqual(test_model._hf_hook, a_ )
self.assertTrue(hasattr(a_, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(a_ )
self.assertFalse(hasattr(a_, """_hf_hook""" ) )
self.assertFalse(hasattr(a_, """_old_forward""" ) )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = ModelForTest()
_snake_case : List[Any] = ModelHook()
add_hook_to_module(a_, a_ )
add_hook_to_module(a_, a_, append=a_ )
self.assertEqual(isinstance(test_model._hf_hook, a_ ), a_ )
self.assertEqual(len(test_model._hf_hook.hooks ), 2 )
self.assertTrue(hasattr(a_, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(a_ )
self.assertFalse(hasattr(a_, """_hf_hook""" ) )
self.assertFalse(hasattr(a_, """_old_forward""" ) )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = ModelForTest()
_snake_case : Optional[Any] = torch.randn(2, 3 )
_snake_case : List[Any] = test_model(x + 1 )
_snake_case : List[str] = test_model(x + 2 )
_snake_case : Any = PreForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : List[Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, a_, atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case : List[str] = PreForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Tuple = test_model(a_ )
self.assertTrue(torch.allclose(a_, a_, atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case : str = SequentialHook(PreForwardHook(), PreForwardHook() )
add_hook_to_module(a_, a_ )
_snake_case : str = test_model(a_ )
assert torch.allclose(a_, a_, atol=1E-5 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = ModelForTest()
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : List[str] = test_model(a_ )
_snake_case : List[Any] = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Union[str, Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1, atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case : Tuple = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Optional[Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1, atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case : Dict = SequentialHook(PostForwardHook(), PostForwardHook() )
add_hook_to_module(a_, a_ )
_snake_case : List[str] = test_model(a_ )
assert torch.allclose(a_, output + 2, atol=1E-5 )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : str = ModelForTest()
_snake_case : Any = torch.randn(2, 3 )
_snake_case : List[str] = test_model(a_ )
_snake_case : List[Any] = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Dict = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1 ) )
self.assertTrue(outputa.requires_grad )
_snake_case : Union[str, Any] = True
_snake_case : Dict = test_model(a_ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device(0 ) )
self.assertEqual(model.lineara.weight.device, torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_snake_case : Any = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(a_, AlignDevicesHook(io_same_device=a_ ) )
_snake_case : int = torch.randn(2, 3 ).to(0 )
_snake_case : Optional[Any] = model(a_ )
self.assertEqual(output.device, torch.device(0 ) )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Optional[int] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Union[str, Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : str = torch.randn(2, 3 )
_snake_case : Optional[int] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_snake_case : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(a_, execution_device=a_, offload=a_ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Optional[int] = torch.device(a_ )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : List[Any] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(a_, execution_device=a_, offload=a_, offload_buffers=a_ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : str = torch.randn(2, 3 )
_snake_case : List[Any] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
a_, execution_device=a_, offload=a_, weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Optional[Any] = torch.device(a_ )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : int = torch.randn(2, 3 )
_snake_case : str = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
a_, execution_device=a_, offload=a_, weights_map=model.state_dict(), offload_buffers=a_, )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : Optional[int] = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
| 132 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
__SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__SCREAMING_SNAKE_CASE = 1
if upper_limit > 0:
__SCREAMING_SNAKE_CASE = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
a__ : List[str] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 54 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[str] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : Optional[Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__lowercase : Union[str, Any] = 128
elif "12-12" in model_name:
__lowercase : Tuple = 12
__lowercase : List[Any] = 12
elif "14-14" in model_name:
__lowercase : int = 14
__lowercase : Dict = 14
elif "16-16" in model_name:
__lowercase : str = 16
__lowercase : Dict = 16
else:
raise ValueError("""Model not supported""" )
__lowercase : Union[str, Any] = """huggingface/label-files"""
if "speech-commands" in model_name:
__lowercase : List[Any] = 35
__lowercase : str = """speech-commands-v2-id2label.json"""
else:
__lowercase : Any = 527
__lowercase : Dict = """audioset-id2label.json"""
__lowercase : Optional[Any] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : List[str] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : List[str] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
if "module.v" in name:
__lowercase : List[str] = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__lowercase : int = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__lowercase : str = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__lowercase : Any = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowercase : Optional[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__lowercase : Union[str, Any] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowercase : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase : Tuple = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase : Any = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase : Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__lowercase : Tuple = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__lowercase : Optional[Any] = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__lowercase : List[str] = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ):
for key in orig_state_dict.copy().keys():
__lowercase : str = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
__lowercase : Optional[int] = key.split(""".""" )
__lowercase : Optional[int] = int(key_split[3] )
__lowercase : Any = config.hidden_size
if "weight" in key:
__lowercase : Union[str, Any] = val[:dim, :]
__lowercase : Union[str, Any] = val[dim : dim * 2, :]
__lowercase : Dict = val[-dim:, :]
else:
__lowercase : Optional[int] = val[:dim]
__lowercase : Any = val[dim : dim * 2]
__lowercase : int = val[-dim:]
else:
__lowercase : Any = val
return orig_state_dict
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
__lowercase : int = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=False ):
__lowercase : List[Any] = get_audio_spectrogram_transformer_config(lowerCAmelCase_ )
__lowercase : Tuple = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__lowercase : str = model_name_to_url[model_name]
__lowercase : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )
# remove some keys
remove_keys(lowerCAmelCase_ )
# rename some keys
__lowercase : Optional[Any] = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
# load 🤗 model
__lowercase : str = ASTForAudioClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__lowercase : Tuple = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
__lowercase : Optional[int] = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
__lowercase : Union[str, Any] = 1024 if """speech-commands""" not in model_name else 128
__lowercase : Any = ASTFeatureExtractor(mean=lowerCAmelCase_ , std=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
if "speech-commands" in model_name:
__lowercase : Optional[int] = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__lowercase : Union[str, Any] = dataset[0]["""audio"""]["""array"""]
else:
__lowercase : List[Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__lowercase , __lowercase : Union[str, Any] = torchaudio.load(lowerCAmelCase_ )
__lowercase : Union[str, Any] = waveform.squeeze().numpy()
__lowercase : int = feature_extractor(lowerCAmelCase_ , sampling_rate=16000 , return_tensors="""pt""" )
# forward pass
__lowercase : Tuple = model(**lowerCAmelCase_ )
__lowercase : int = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__lowercase : Union[str, Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__lowercase : Optional[int] = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__lowercase : Union[str, Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__lowercase : Optional[Any] = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__lowercase : List[Any] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__lowercase : str = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__lowercase : List[str] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__lowercase : List[Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase : Tuple = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 233 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.dummy_uncond_unet
lowerCAmelCase__ = PNDMScheduler()
lowerCAmelCase__ = PNDMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pndm.to(lowerCamelCase_ )
pndm.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pndm(generator=lowerCamelCase_ , num_inference_steps=20 , output_type='''numpy''' ).images
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pndm(generator=lowerCamelCase_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=lowerCamelCase_ )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = '''google/ddpm-cifar10-32'''
lowerCAmelCase__ = UNetaDModel.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = PNDMScheduler()
lowerCAmelCase__ = PNDMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pndm.to(lowerCamelCase_ )
pndm.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pndm(generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 366 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__UpperCAmelCase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__UpperCAmelCase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__UpperCAmelCase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _snake_case ( A , A ) -> List[Any]:
return float((preds == labels).mean() )
def _snake_case ( A , A , A="binary" ) -> int:
lowerCAmelCase__ = simple_accuracy(A , A )
lowerCAmelCase__ = float(fa_score(y_true=A , y_pred=A , average=A ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( A , A ) -> List[Any]:
lowerCAmelCase__ = {}
for id_pred, label in zip(A , A ):
lowerCAmelCase__ = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
lowerCAmelCase__ = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ = zip(*A )
lowerCAmelCase__ = fa_score(y_true=A , y_pred=A , average='''macro''' )
fas.append(A )
lowerCAmelCase__ = int(sum(pred == label for pred, label in preds_labels ) == len(A ) )
ems.append(A )
lowerCAmelCase__ = float(sum(A ) / len(A ) )
lowerCAmelCase__ = sum(A ) / len(A )
lowerCAmelCase__ = float(fa_score(y_true=A , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ , fa_avg='''macro''' )
elif self.config_name == "record":
lowerCAmelCase__ = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowerCAmelCase__ = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(lowerCamelCase_ , lowerCamelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCamelCase_ , lowerCamelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) | 228 | 0 |
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Dict =logging.get_logger(__name__)
a__ : Optional[Any] ='''▁'''
a__ : List[str] ={'''vocab_file''': '''prophetnet.tokenizer'''}
a__ : List[str] ={
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
a__ : List[Any] ={
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
a__ : Any ={
'''microsoft/xprophetnet-large-wiki100-cased''': 512,
}
def lowercase__ ( __lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict()
with open(__lowercase , 'r' , encoding='utf-8' ) as reader:
__UpperCamelCase = reader.readlines()
for index, token in enumerate(__lowercase ):
__UpperCamelCase = token.rstrip('\n' )
__UpperCamelCase = index
return vocab
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str =["input_ids", "attention_mask"]
def __init__( self : Dict , __A : Union[str, Any] , __A : Any="[SEP]" , __A : Optional[Any]="[SEP]" , __A : int="[SEP]" , __A : Any="[UNK]" , __A : Union[str, Any]="[PAD]" , __A : Any="[CLS]" , __A : Tuple="[MASK]" , __A : Optional[Dict[str, Any]] = None , **__A : Tuple , ):
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , unk_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
__UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCamelCase = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(1_0 ):
__UpperCamelCase = f'''[unused{i}]'''
__UpperCamelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCamelCase = 1_2
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__A )
def __getstate__( self : Union[str, Any] ):
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self : Any , __A : Dict ):
__UpperCamelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return ([0] * len(__A )) + [1]
return ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
def _lowerCamelCase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
__UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCamelCase ( self : Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self : int , __A : str ):
return self.sp_model.encode(__A , out_type=__A )
def _lowerCamelCase ( self : Tuple , __A : Tuple ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self : Optional[Any] , __A : List[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[int] ):
__UpperCamelCase = ''.join(__A ).replace(__A , ' ' ).strip()
return out_string
def _lowerCamelCase ( self : Tuple , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def _lowerCamelCase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 53 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = 8
# DPR tok
__UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) )
__UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def _lowerCamelCase ( self : Tuple ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Optional[int] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowerCamelCase ( self : Any , __A : bool ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' )
__UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def _lowerCamelCase ( self : int ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__A , open(__A , 'wb' ) )
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Optional[Any] ):
import torch
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
__UpperCamelCase = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
| 53 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
__snake_case = 5
__snake_case = 10
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( _a , unittest.TestCase ):
'''simple docstring'''
A_ : str = SpeechaTextTokenizer
A_ : Any = False
A_ : Optional[int] = True
def _UpperCAmelCase ( self ) -> Tuple:
super().setUp()
_a = sp.SentencePieceProcessor()
spm_model.Load(__lowerCAmelCase )
_a = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__lowerCAmelCase ) )]
_a = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_a = Path(self.tmpdirname )
save_json(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
_a = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ) -> str:
_a = '''<pad>'''
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCAmelCase ( self ) -> int:
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__lowerCAmelCase ) , 1001 )
def _UpperCAmelCase ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def _UpperCAmelCase ( self ) -> int:
_a = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [289, 50, 14, 174, 386] , )
_a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
_a = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
_a = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
# fmt: off
_a = {'''input_ids''': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Any = """valhalla/s2t_mustc_multilinguial_medium"""
A_ : List[Any] = """C'est trop cool"""
A_ : Optional[Any] = """Esto es genial"""
@classmethod
def _UpperCAmelCase ( cls ) -> int:
_a = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def _UpperCAmelCase ( self ) -> List[Any]:
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
_a = [ES_CODE, 4, 1601, 47, 7647, 2]
_a = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def _UpperCAmelCase ( self ) -> str:
_a = '''fr'''
_a = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __lowerCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def _UpperCAmelCase ( self ) -> Tuple:
_a = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
_a = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 371 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_a = remove_duplicates(key.upper() )
_a = len(_lowerCAmelCase )
# First fill cipher with key characters
_a = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowerCAmelCase ), 26 ):
_a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_a = alphabet[i - offset]
_a = char
return cipher_alphabet
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : dict[str, str] ):
"""simple docstring"""
return "".join(cipher_map.get(_lowerCAmelCase, _lowerCAmelCase ) for ch in message.upper() )
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : dict[str, str] ):
"""simple docstring"""
_a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowerCAmelCase, _lowerCAmelCase ) for ch in message.upper() )
def A_ ( ):
"""simple docstring"""
_a = input('''Enter message to encode or decode: ''' ).strip()
_a = input('''Enter keyword: ''' ).strip()
_a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
_a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
_a = create_cipher_map(_lowerCAmelCase )
print(func(_lowerCAmelCase, _lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 153 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Optional[int] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class A ( __snake_case ):
__magic_name__ = '''gptj'''
__magic_name__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , SCREAMING_SNAKE_CASE=50400 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=28 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : int = vocab_size
A : Tuple = n_positions
A : Optional[int] = n_embd
A : List[Any] = n_layer
A : Optional[Any] = n_head
A : Tuple = n_inner
A : Tuple = rotary_dim
A : Tuple = activation_function
A : Optional[int] = resid_pdrop
A : Dict = embd_pdrop
A : Tuple = attn_pdrop
A : Tuple = layer_norm_epsilon
A : Any = initializer_range
A : Dict = use_cache
A : Tuple = bos_token_id
A : Any = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "default" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE , task=SCREAMING_SNAKE_CASE , patching_specs=SCREAMING_SNAKE_CASE , use_past=SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
A : str = 0
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
A : str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' )
A : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
A : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self._config.n_head
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
A : Optional[int] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A, A : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A : Dict = seqlen + 2
A : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A : Optional[Any] = [
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
A : Any = common_inputs['''attention_mask''']
if self.use_past:
A : List[str] = ordered_inputs['''attention_mask'''].dtype
A : int = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return 13
| 3 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "Speech2TextFeatureExtractor"
lowercase__ = "Speech2TextTokenizer"
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.feature_extractor
lowercase_ = False
def __call__( self : Dict , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str]):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_)
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""")
lowercase_ = kwargs.pop("""raw_speech""")
else:
lowercase_ = kwargs.pop("""audio""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""sampling_rate""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""text""" , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
lowercase_ = args[0]
lowercase_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if audio is not None:
lowercase_ = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_)
if text is not None:
lowercase_ = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_)
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase_ = encodings["""input_ids"""]
return inputs
def _UpperCAmelCase ( self : List[str] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : str):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_)
@contextmanager
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""")
lowercase_ = True
lowercase_ = self.tokenizer
yield
lowercase_ = self.feature_extractor
lowercase_ = False
| 136 | 0 |
from collections.abc import Iterable
from typing import Generic, TypeVar
snake_case = TypeVar("""_T""")
class SCREAMING_SNAKE_CASE ( Generic[_T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Iterable[_T] | None = None ):
SCREAMING_SNAKE_CASE : list[_T] = list(iterable or [] )
SCREAMING_SNAKE_CASE : list[_T] = []
def __len__( self : Dict ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : str ):
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def _A ( self : List[Any] , UpperCAmelCase_ : _T ):
self._stacka.append(UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : str = self._stacka.pop
SCREAMING_SNAKE_CASE : Tuple = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 319 |
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts
SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets
SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Tuple = src_parent
SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ )
return True
def _A ( self : Tuple , UpperCAmelCase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 319 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Tuple = 'dpr'
def __init__(self , A=30_522 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1E-12 , A=0 , A="absolute" , A = 0 , **A , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=A , **A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = projection_dim
_a = position_embedding_type
| 211 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : int
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter s type must be str.''')
return [s[i:] + s[:i] for i in range(len(__A))]
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter s type must be str.''')
if not s:
raise ValueError('''The parameter s must not be empty.''')
_a = all_rotations(__A)
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a = {
"bwt_string": "".join([word[-1] for word in rotations]),
"idx_original_string": rotations.index(__A),
}
return response
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter bwt_string type must be str.''')
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''')
try:
_a = int(__A)
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''')
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''')
if idx_original_string >= len(__A):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''')
_a = [''''''] * len(__A)
for _ in range(len(__A)):
for i in range(len(__A)):
_a = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase_ = "Provide a string that I will generate its BWT transform: "
lowercase_ = input(entry_msg).strip()
lowercase_ = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
lowercase_ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 211 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_, lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """maskformer-swin"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Dict=9_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=[3, 6, 1_2, 2_4] , SCREAMING_SNAKE_CASE_ : List[str]=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-5 , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : str , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : List[str] = embed_dim
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : Any = mlp_ratio
lowerCAmelCase_ : Any = qkv_bias
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
lowerCAmelCase_ : List[Any] = ['stem'] + [F"stage{idx}" for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 289 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] ) -> None:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = len(lowerCAmelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
lowerCAmelCase_ : str = 0
print(lowerCAmelCase__ , end=',' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=',' )
lowerCAmelCase_ : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : List[str] = [1, 3, 0, 5, 8, 5]
lowercase__ : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 289 | 1 |
from __future__ import annotations
from math import pow, sqrt
def __UpperCAmelCase ( a_ , a_ , a_):
if (resistance, reactance, impedance).count(0) != 1:
raise ValueError('One and only one argument must be 0')
if resistance == 0:
return {"resistance": sqrt(pow(a_ , 2) - pow(a_ , 2))}
elif reactance == 0:
return {"reactance": sqrt(pow(a_ , 2) - pow(a_ , 2))}
elif impedance == 0:
return {"impedance": sqrt(pow(a_ , 2) + pow(a_ , 2))}
else:
raise ValueError('Exactly one argument must be 0')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
"""simple docstring"""
a : Optional[Any] ='''deformable_detr'''
a : Dict ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1_024 , snake_case__=6 , snake_case__=1_024 , snake_case__=8 , snake_case__=6 , snake_case__=1_024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.25 , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase : Optional[Any] = backbone_config.get("model_type" )
lowerCAmelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Tuple = config_class.from_dict(lowerCAmelCase_ )
lowerCAmelCase : Tuple = use_timm_backbone
lowerCAmelCase : Optional[int] = backbone_config
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : Any = num_queries
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[str] = encoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Dict = encoder_attention_heads
lowerCAmelCase : List[str] = decoder_ffn_dim
lowerCAmelCase : Tuple = decoder_layers
lowerCAmelCase : Optional[Any] = decoder_attention_heads
lowerCAmelCase : Optional[int] = dropout
lowerCAmelCase : List[str] = attention_dropout
lowerCAmelCase : List[Any] = activation_dropout
lowerCAmelCase : List[str] = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : List[str] = init_xavier_std
lowerCAmelCase : Any = encoder_layerdrop
lowerCAmelCase : Tuple = auxiliary_loss
lowerCAmelCase : List[Any] = position_embedding_type
lowerCAmelCase : int = backbone
lowerCAmelCase : str = use_pretrained_backbone
lowerCAmelCase : Any = dilation
# deformable attributes
lowerCAmelCase : Dict = num_feature_levels
lowerCAmelCase : Dict = encoder_n_points
lowerCAmelCase : int = decoder_n_points
lowerCAmelCase : int = two_stage
lowerCAmelCase : Any = two_stage_num_proposals
lowerCAmelCase : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : int = bbox_cost
lowerCAmelCase : Dict = giou_cost
# Loss coefficients
lowerCAmelCase : Tuple = mask_loss_coefficient
lowerCAmelCase : Tuple = dice_loss_coefficient
lowerCAmelCase : Optional[int] = bbox_loss_coefficient
lowerCAmelCase : Optional[int] = giou_loss_coefficient
lowerCAmelCase : Any = eos_coefficient
lowerCAmelCase : Union[str, Any] = focal_alpha
lowerCAmelCase : int = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.d_model
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : Any = self.backbone_config.to_dict()
lowerCAmelCase : List[str] = self.__class__.model_type
return output
| 365 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : List[str] =None
a : List[Any] =BloomTokenizerFast
a : Optional[int] =BloomTokenizerFast
a : Optional[Any] =True
a : Dict =False
a : Optional[Any] ="tokenizer_file"
a : Optional[int] ={"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : Tuple = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.get_rust_tokenizer()
lowerCAmelCase : List[Any] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowerCAmelCase : str = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
lowerCAmelCase : Optional[int] = tokenizer.batch_encode_plus(snake_case__ )["input_ids"]
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase : str = "This is a simple input"
lowerCAmelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case__ , max_length=snake_case__ )
tokenizer_r.encode_plus(snake_case__ , max_length=snake_case__ )
tokenizer_r.batch_encode_plus(snake_case__ , max_length=snake_case__ )
tokenizer_r.encode(snake_case__ , max_length=snake_case__ )
tokenizer_r.batch_encode_plus(snake_case__ , max_length=snake_case__ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowerCAmelCase : Tuple = None # Hotfixing padding = None
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Simple input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Simple input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" , )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Pair input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.get_rust_tokenizer()
lowerCAmelCase : int = load_dataset("xnli" , "all_languages" , split="test" , streaming=snake_case__ )
lowerCAmelCase : Tuple = next(iter(snake_case__ ) )["premise"] # pick up one data
lowerCAmelCase : Optional[Any] = list(sample_data.values() )
lowerCAmelCase : int = list(map(tokenizer.encode , snake_case__ ) )
lowerCAmelCase : List[Any] = [tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ ) for x in output_tokens]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 133 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
return image
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = dct.pop(lowercase )
lowerCamelCase_ = val
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCamelCase_ = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowerCamelCase_ = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowerCamelCase_ = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
lowerCamelCase_ = qkv_bias
def _SCREAMING_SNAKE_CASE ( lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = 3_64 if 'coco' in model_name else 2_24
lowerCamelCase_ = InstructBlipVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowerCamelCase_ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase_ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowerCamelCase_ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
lowerCamelCase_ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowerCamelCase_ = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
lowerCamelCase_ = InstructBlipConfig(vision_config=lowercase , text_config=lowercase , qformer_config=lowercase )
return config, image_size
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Optional[int]=None , lowercase : Optional[Any]=False ):
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
lowerCamelCase_ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowerCamelCase_ = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
lowerCamelCase_ , lowerCamelCase_ = get_blipa_config(lowercase )
lowerCamelCase_ = InstructBlipForConditionalGeneration(lowercase ).eval()
lowerCamelCase_ = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
lowerCamelCase_ , lowerCamelCase_ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowerCamelCase_ = 'cuda:1' if torch.cuda.is_available() else 'cpu'
lowerCamelCase_ = 'cuda:2' if torch.cuda.is_available() else 'cpu'
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
lowerCamelCase_ = original_model.state_dict()
lowerCamelCase_ = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase_ = state_dict.pop(lowercase )
if key.startswith('Qformer.bert' ):
lowerCamelCase_ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowerCamelCase_ = key.replace('self' , 'attention' )
if "llm_proj" in key:
lowerCamelCase_ = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
lowerCamelCase_ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
lowerCamelCase_ = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
lowerCamelCase_ = key.replace('t5' , 'language' )
lowerCamelCase_ = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(lowercase , strict=lowercase )
lowerCamelCase_ = load_demo_image()
lowerCamelCase_ = 'What is unusual about this image?'
# create processor
lowerCamelCase_ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase , image_std=lowercase )
lowerCamelCase_ = InstructBlipProcessor(
image_processor=lowercase , tokenizer=lowercase , qformer_tokenizer=lowercase , )
lowerCamelCase_ = processor(images=lowercase , text=lowercase , return_tensors='pt' ).to(lowercase )
# make sure processor creates exact same pixel values
lowerCamelCase_ = vis_processors['eval'](lowercase ).unsqueeze(0 ).to(lowercase )
lowerCamelCase_ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "vicuna" in model_name:
lowerCamelCase_ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
lowerCamelCase_ = hf_model(**lowercase ).logits
else:
lowerCamelCase_ = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
lowerCamelCase_ = tokenizer('\n' , return_tensors='pt' ).input_ids.to(lowercase )
lowerCamelCase_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
lowerCamelCase_ = hf_model(**lowercase , labels=lowercase ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowerCamelCase_ = 1e-4 if 'vicuna' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , lowercase , atol=lowercase )
print('Looks ok!' )
print('Generating with original model...' )
lowerCamelCase_ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
lowerCamelCase_ = hf_model.generate(
**lowercase , do_sample=lowercase , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowerCamelCase_ = 2
print('Original generation:' , lowercase )
lowerCamelCase_ = processor.batch_decode(lowercase , skip_special_tokens=lowercase )
lowerCamelCase_ = [text.strip() for text in output_text]
print('HF generation:' , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(f"""Salesforce/{model_name}""" )
hf_model.push_to_hub(f"""Salesforce/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
lowerCamelCase : Tuple = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
lowerCamelCase : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 204 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class A:
'''simple docstring'''
def __init__( self : str , A_ : Optional[Any] , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 2
lowerCamelCase_ = 99
lowerCamelCase_ = 0
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.02
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 'last'
lowerCamelCase_ = True
lowerCamelCase_ = None
lowerCamelCase_ = 0
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase_ = None
if self.use_input_lengths:
lowerCamelCase_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self : int , A_ : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : int , A_ : Tuple , A_ : Optional[int] , A_ : Optional[int] , A_ : str , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Tuple , A_ : List[str] , A_ : int , A_ : List[Any] , A_ : Any , A_ : Any , A_ : Dict , A_ : str , A_ : List[Any] , A_ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertWithLMHeadModel(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , A_ : Tuple , A_ : Any , A_ : Any , A_ : List[Any] , A_ : Dict , A_ : List[Any] , A_ : Union[str, Any] , A_ : Optional[int] , A_ : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForQuestionAnsweringSimple(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[int] , A_ : List[Any] , A_ : str , A_ : List[str] , A_ : Dict , A_ : Optional[Any] , A_ : Tuple , A_ : str , A_ : Optional[int] , A_ : Tuple , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForSequenceClassification(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : Dict , A_ : Optional[Any] , A_ : List[Any] , A_ : int , A_ : Any , A_ : Union[str, Any] , A_ : str , A_ : Any , A_ : Union[str, Any] , A_ : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFFlaubertForTokenClassification(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : List[Any] , A_ : Optional[int] , A_ : List[Any] , A_ : Optional[int] , A_ : Tuple , A_ : Union[str, Any] , A_ : int , A_ : str , A_ : Tuple , A_ : str , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = TFFlaubertForMultipleChoice(config=A_ )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Union[str, Any] , A_ : Any , A_ : List[Any] , A_ : Union[str, Any] , A_ : str , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , emb_dim=37 )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
lowerCamelCase_ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase_ = model(A_ )[0]
lowerCamelCase_ = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
lowerCamelCase_ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 204 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list:
lowercase__: Optional[int] = [0] * len(__UpperCAmelCase )
for i in range(1 , len(__UpperCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__: Any = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__: List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__: Union[str, Any] = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
return max(prefix_function(__UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=_lowerCamelCase ):
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : List[str] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : Optional[Any] , *lowercase_ : int , **lowercase_ : List[str] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a ( metaclass=_lowerCamelCase ):
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Any , *lowercase_ : Tuple , **lowercase_ : str ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : List[str] , *lowercase_ : Any , **lowercase_ : int ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : List[Any] , *lowercase_ : List[str] , **lowercase_ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a ( metaclass=_lowerCamelCase ):
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : Tuple ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : List[str] , *lowercase_ : Optional[int] , **lowercase_ : Optional[int] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : List[Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a ( metaclass=_lowerCamelCase ):
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : Tuple ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : Any , *lowercase_ : int , **lowercase_ : Any ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Any ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a ( metaclass=_lowerCamelCase ):
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : Union[str, Any] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : Dict , *lowercase_ : List[Any] , **lowercase_ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : Dict , *lowercase_ : Dict , **lowercase_ : List[str] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class a ( metaclass=_lowerCamelCase ):
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : List[str] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : int , *lowercase_ : Dict , **lowercase_ : Optional[int] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def A_ ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 56 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = CycleDiffusionPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Tuple ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ = CLIPTextModel(lowercase_ )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any]=0 ):
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ = image / 2 + 0.5
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(lowercase_ )
else:
snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def A_ ( self : Union[str, Any] ):
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = CycleDiffusionPipeline(**lowercase_ )
snake_case_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe(**lowercase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase_ , '''half''' ):
snake_case_ = module.half()
snake_case_ = CycleDiffusionPipeline(**lowercase_ )
snake_case_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe(**lowercase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def A_ ( self : List[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Union[str, Any] ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = '''CompVis/stable-diffusion-v1-4'''
snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_ = CycleDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case_ = '''A black colored car'''
snake_case_ = '''A blue colored car'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A_ ( self : List[str] ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = '''CompVis/stable-diffusion-v1-4'''
snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_ = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case_ = '''A black colored car'''
snake_case_ = '''A blue colored car'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 56 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 369 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''philschmid/bart-large-cnn-samsum'''
snake_case_ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
snake_case_ = '''summarizer'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ['''text''']
snake_case_ = ['''text''']
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.model.generate(**lowerCamelCase__ )[0]
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
| 348 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ) ) )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
snake_case__ : Tuple = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(_lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case__ : Any = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(_lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
snake_case__ : Tuple = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(_lowerCAmelCase )
snake_case__ : Tuple = []
for value in value_array:
snake_case__ : Any = euclidean(_lowerCAmelCase , dataset[0] )
snake_case__ : Any = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case__ : Union[str, Any] = euclidean(_lowerCAmelCase , _lowerCAmelCase )
if dist > temp_dist:
snake_case__ : Union[str, Any] = temp_dist
snake_case__ : int = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
return np.dot(_lowerCAmelCase , _lowerCAmelCase ) / (norm(_lowerCAmelCase ) * norm(_lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | """simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_UpperCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(_a)
class UpperCAmelCase_ ( _a):
def __init__( self , **a ) -> Dict:
super().__init__(**a )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(a )
def __call__( self , a , a = None , **a , ) -> List[str]:
if "text_queries" in kwargs:
lowercase__ : Optional[Any] = kwargs.pop('text_queries' )
if isinstance(a , (str, Image.Image) ):
lowercase__ : Optional[Any] = {'image': image, 'candidate_labels': candidate_labels}
else:
lowercase__ : List[str] = image
lowercase__ : Optional[Any] = super().__call__(a , **a )
return results
def _UpperCAmelCase ( self , **a ) -> Dict:
lowercase__ : Optional[Any] = {}
if "threshold" in kwargs:
lowercase__ : Tuple = kwargs['threshold']
if "top_k" in kwargs:
lowercase__ : List[Any] = kwargs['top_k']
return {}, {}, postprocess_params
def _UpperCAmelCase ( self , a ) -> Dict:
lowercase__ : Any = load_image(inputs['image'] )
lowercase__ : Optional[int] = inputs['candidate_labels']
if isinstance(a , a ):
lowercase__ : Optional[int] = candidate_labels.split(',' )
lowercase__ : Optional[int] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(a ):
lowercase__ : List[str] = self.tokenizer(a , return_tensors=self.framework )
lowercase__ : List[Any] = self.image_processor(a , return_tensors=self.framework )
yield {
"is_last": i == len(a ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _UpperCAmelCase ( self , a ) -> List[Any]:
lowercase__ : List[Any] = model_inputs.pop('target_size' )
lowercase__ : Dict = model_inputs.pop('candidate_label' )
lowercase__ : Dict = model_inputs.pop('is_last' )
lowercase__ : Optional[int] = self.model(**a )
lowercase__ : Any = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def _UpperCAmelCase ( self , a , a=0.1 , a=None ) -> Union[str, Any]:
lowercase__ : Dict = []
for model_output in model_outputs:
lowercase__ : List[Any] = model_output['candidate_label']
lowercase__ : Optional[int] = BaseModelOutput(a )
lowercase__ : Any = self.image_processor.post_process_object_detection(
outputs=a , threshold=a , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
lowercase__ : Union[str, Any] = outputs['scores'][index].item()
lowercase__ : Tuple = self._get_bounding_box(outputs['boxes'][index][0] )
lowercase__ : Tuple = {'score': score, 'label': label, 'box': box}
results.append(a )
lowercase__ : Dict = sorted(a , key=lambda a : x["score"] , reverse=a )
if top_k:
lowercase__ : Dict = results[:top_k]
return results
def _UpperCAmelCase ( self , a ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = box.int().tolist()
lowercase__ : Any = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 77 | 0 |
import math
def A__ ( lowerCamelCase = 1_00 ) -> Tuple:
UpperCamelCase_: Tuple = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase_: Dict = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 370 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = """luke"""
def __init__( self : Tuple , snake_case_ : List[Any]=5_0267 , snake_case_ : Any=50_0000 , snake_case_ : str=768 , snake_case_ : int=256 , snake_case_ : str=12 , snake_case_ : int=12 , snake_case_ : Dict=3072 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : int=512 , snake_case_ : Dict=2 , snake_case_ : List[Any]=0.02 , snake_case_ : int=1e-12 , snake_case_ : Union[str, Any]=True , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=1 , snake_case_ : Optional[int]=0 , snake_case_ : List[str]=2 , **snake_case_ : Union[str, Any] , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: Tuple = entity_vocab_size
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: Any = entity_emb_size
UpperCamelCase_: str = num_hidden_layers
UpperCamelCase_: Union[str, Any] = num_attention_heads
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: str = hidden_dropout_prob
UpperCamelCase_: List[str] = attention_probs_dropout_prob
UpperCamelCase_: int = max_position_embeddings
UpperCamelCase_: int = type_vocab_size
UpperCamelCase_: List[Any] = initializer_range
UpperCamelCase_: Union[str, Any] = layer_norm_eps
UpperCamelCase_: Tuple = use_entity_aware_attention
UpperCamelCase_: int = classifier_dropout
| 223 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Optional[Any] = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 |
'''simple docstring'''
def _A ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
lowercase__ = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
lowercase__ = str(bin(lowercase__ ) )[2:]
lowercase__ = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
_snake_case : Any = set()
# Replace all the whitespace in our sentence
_snake_case : List[Any] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCamelCase_ ) == 26
def UpperCAmelCase__ (snake_case__ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
_snake_case : List[str] = [False] * 26
for char in input_str:
if char.islower():
_snake_case : Union[str, Any] = True
elif char.isupper():
_snake_case : str = True
return all(lowerCamelCase_ )
def UpperCAmelCase__ (snake_case__ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCAmelCase__ ():
"""simple docstring"""
from timeit import timeit
_snake_case : Optional[Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit("""is_pangram()""" , setup=lowerCamelCase_ ) )
print(timeit("""is_pangram_faster()""" , setup=lowerCamelCase_ ) )
print(timeit("""is_pangram_fastest()""" , setup=lowerCamelCase_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 356 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[int] ):
'''simple docstring'''
super().__init__()
_snake_case : List[str] = nn.Linear(3, 4 )
_snake_case : int = nn.BatchNormad(4 )
_snake_case : List[str] = nn.Linear(4, 5 )
def UpperCamelCase_ ( self: Any, a_: Union[str, Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(a_ ) ) )
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: Any, a_: int, *a_: Dict, **a_: Dict ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: str, a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
return output + 1
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = ModelForTest()
_snake_case : List[str] = ModelHook()
add_hook_to_module(a_, a_ )
self.assertEqual(test_model._hf_hook, a_ )
self.assertTrue(hasattr(a_, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(a_ )
self.assertFalse(hasattr(a_, """_hf_hook""" ) )
self.assertFalse(hasattr(a_, """_old_forward""" ) )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = ModelForTest()
_snake_case : List[Any] = ModelHook()
add_hook_to_module(a_, a_ )
add_hook_to_module(a_, a_, append=a_ )
self.assertEqual(isinstance(test_model._hf_hook, a_ ), a_ )
self.assertEqual(len(test_model._hf_hook.hooks ), 2 )
self.assertTrue(hasattr(a_, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(a_ )
self.assertFalse(hasattr(a_, """_hf_hook""" ) )
self.assertFalse(hasattr(a_, """_old_forward""" ) )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = ModelForTest()
_snake_case : Optional[Any] = torch.randn(2, 3 )
_snake_case : List[Any] = test_model(x + 1 )
_snake_case : List[str] = test_model(x + 2 )
_snake_case : Any = PreForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : List[Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, a_, atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case : List[str] = PreForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Tuple = test_model(a_ )
self.assertTrue(torch.allclose(a_, a_, atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case : str = SequentialHook(PreForwardHook(), PreForwardHook() )
add_hook_to_module(a_, a_ )
_snake_case : str = test_model(a_ )
assert torch.allclose(a_, a_, atol=1E-5 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = ModelForTest()
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : List[str] = test_model(a_ )
_snake_case : List[Any] = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Union[str, Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1, atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case : Tuple = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Optional[Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1, atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case : Dict = SequentialHook(PostForwardHook(), PostForwardHook() )
add_hook_to_module(a_, a_ )
_snake_case : List[str] = test_model(a_ )
assert torch.allclose(a_, output + 2, atol=1E-5 )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : str = ModelForTest()
_snake_case : Any = torch.randn(2, 3 )
_snake_case : List[str] = test_model(a_ )
_snake_case : List[Any] = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Dict = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1 ) )
self.assertTrue(outputa.requires_grad )
_snake_case : Union[str, Any] = True
_snake_case : Dict = test_model(a_ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device(0 ) )
self.assertEqual(model.lineara.weight.device, torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_snake_case : Any = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(a_, AlignDevicesHook(io_same_device=a_ ) )
_snake_case : int = torch.randn(2, 3 ).to(0 )
_snake_case : Optional[Any] = model(a_ )
self.assertEqual(output.device, torch.device(0 ) )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Optional[int] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Union[str, Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : str = torch.randn(2, 3 )
_snake_case : Optional[int] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_snake_case : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(a_, execution_device=a_, offload=a_ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Optional[int] = torch.device(a_ )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : List[Any] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(a_, execution_device=a_, offload=a_, offload_buffers=a_ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : str = torch.randn(2, 3 )
_snake_case : List[Any] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
a_, execution_device=a_, offload=a_, weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Optional[Any] = torch.device(a_ )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : int = torch.randn(2, 3 )
_snake_case : str = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
a_, execution_device=a_, offload=a_, weights_map=model.state_dict(), offload_buffers=a_, )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : Optional[int] = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
| 132 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
A_ : Dict = ["""pixel_values"""]
def __init__(self : Tuple , a__ : Dict = True , a__ : Any = None , a__ : str = PILImageResampling.BILINEAR , a__ : int = True , a__ : Optional[Any] = 1 / 255 , a__ : Tuple = True , a__ : List[str] = None , a__ : List[str] = True , **a__ : Tuple , ):
"""simple docstring"""
super().__init__(**_a )
__snake_case = size if size is not None else {"""shortest_edge""": 224}
__snake_case = get_size_dict(_a , default_to_square=_a )
__snake_case = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
__snake_case = get_size_dict(_a , param_name='''crop_size''' )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def a (self : List[str] , a__ : Any , a__ : Dict , a__ : int = PIL.Image.BILINEAR , a__ : Union[str, Any] = None , **a__ : List[Any] , ):
"""simple docstring"""
__snake_case = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
__snake_case = get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def a (self : List[str] , a__ : Tuple , a__ : Tuple , a__ : str = None , **a__ : Optional[Any] , ):
"""simple docstring"""
__snake_case = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def a (self : List[str] , a__ : Any , a__ : Dict , a__ : List[Any] = None , **a__ : List[str] , ):
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def a (self : List[str] , a__ : str , a__ : List[Any] = None ):
"""simple docstring"""
return flip_channel_order(_a , data_format=_a )
def a (self : int , a__ : Optional[int] , a__ : Dict = None , a__ : Any = None , a__ : Tuple = None , a__ : str = None , a__ : List[Any] = None , a__ : Any = None , a__ : Tuple = None , a__ : Tuple = None , a__ : Any = None , a__ : Optional[int] = ChannelDimension.FIRST , **a__ : int , ):
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = resample if resample is not None else self.resample
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(_a , default_to_square=_a )
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(_a , param_name='''crop_size''' )
__snake_case = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(_a ) for image in images]
if do_resize:
__snake_case = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=_a , scale=_a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__snake_case = [self.flip_channel_order(image=_a ) for image in images]
__snake_case = [to_channel_dimension_format(_a , _a ) for image in images]
__snake_case = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
def a (self : int , a__ : List[Any] , a__ : Optional[int] = None ):
"""simple docstring"""
__snake_case = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_a ):
__snake_case = target_sizes.numpy()
__snake_case = []
for idx in range(len(_a ) ):
__snake_case = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_a )
__snake_case = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
__snake_case = logits.argmax(dim=1 )
__snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 24 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=2 , _a=24 , _a=16 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , _a=2 , _a=2 , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : str = max_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_mel_bins
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : List[str] = frequency_stride
SCREAMING_SNAKE_CASE__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE__ : Any = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE__ : Any = num_patches + 2
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_values, labels
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ASTModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :Dict = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
_SCREAMING_SNAKE_CASE :Any = False
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
_SCREAMING_SNAKE_CASE :Tuple = False
def _a ( self , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ASTModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _a ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
"""simple docstring"""
pass
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(_a )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ASTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( ) -> int:
SCREAMING_SNAKE_CASE__ : List[Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torchaudio.load(__lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __a (unittest.TestCase):
'''simple docstring'''
@cached_property
def _a ( self ) -> int:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[Any] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Dict = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_audio()
SCREAMING_SNAKE_CASE__ : List[str] = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(_a , sampling_rate=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_a )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _a )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 132 | 0 |
a : Tuple = 'Input must be a string of 8 numbers plus letter'
a : Dict = 'TRWAGMYFPDXBNJZSQVHLCKE'
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_: List[Any] = F'Expected string as input, found {type(lowerCAmelCase__ ).__name__}'
raise TypeError(lowerCAmelCase__ )
UpperCAmelCase_: List[str] = spanish_id.replace("""-""" , """""" ).upper()
if len(lowerCAmelCase__ ) != 9:
raise ValueError(lowerCAmelCase__ )
try:
UpperCAmelCase_: Union[str, Any] = int(spanish_id_clean[0:8] )
UpperCAmelCase_: int = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCAmelCase__ ) from ex
if letter.isdigit():
raise ValueError(lowerCAmelCase__ )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( _lowerCAmelCase ):
A = '''encodec'''
def __init__(self, SCREAMING_SNAKE_CASE_=[1.5, 3.0, 6.0, 1_2.0, 2_4.0], SCREAMING_SNAKE_CASE_=24000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=[8, 5, 4, 2], SCREAMING_SNAKE_CASE_="weight_norm", SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="reflect", SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
UpperCAmelCase_: List[Any] = target_bandwidths
UpperCAmelCase_: str = sampling_rate
UpperCAmelCase_: Any = audio_channels
UpperCAmelCase_: List[str] = normalize
UpperCAmelCase_: List[Any] = chunk_length_s
UpperCAmelCase_: List[Any] = overlap
UpperCAmelCase_: Any = hidden_size
UpperCAmelCase_: str = num_filters
UpperCAmelCase_: Any = num_residual_layers
UpperCAmelCase_: int = upsampling_ratios
UpperCAmelCase_: Tuple = norm_type
UpperCAmelCase_: Union[str, Any] = kernel_size
UpperCAmelCase_: str = last_kernel_size
UpperCAmelCase_: Union[str, Any] = residual_kernel_size
UpperCAmelCase_: str = dilation_growth_rate
UpperCAmelCase_: int = use_causal_conv
UpperCAmelCase_: int = pad_mode
UpperCAmelCase_: List[Any] = compress
UpperCAmelCase_: Dict = num_lstm_layers
UpperCAmelCase_: List[Any] = trim_right_ratio
UpperCAmelCase_: List[Any] = codebook_size
UpperCAmelCase_: List[Any] = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase_: Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __snake_case (self ) -> int:
UpperCAmelCase_: Optional[int] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __snake_case (self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 82 | 1 |
from math import ceil
def A ( _lowerCamelCase = 1_001 ):
'''simple docstring'''
_lowerCAmelCase : int = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_lowerCAmelCase : List[Any] = 2 * i + 1
_lowerCAmelCase : str = 2 * i
_lowerCAmelCase : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_snake_case = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 36 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 0 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :List[str]=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
__UpperCAmelCase = torch.tensor(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) ).unsqueeze(0 ) # Batch size 1
__UpperCAmelCase = model(snake_case_ )[0] # The last hidden-state is the first element of the output tuple
__UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__UpperCAmelCase = logits[0, masked_index, :]
__UpperCAmelCase = logits.softmax(dim=0 )
__UpperCAmelCase , __UpperCAmelCase = prob.topk(k=snake_case_ , dim=0 )
__UpperCAmelCase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case_ ) )] )
__UpperCAmelCase = tokenizer.mask_token
__UpperCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__UpperCAmelCase = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(snake_case_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(snake_case_ ) , snake_case_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(snake_case_ , snake_case_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_lowercase : Any = CamembertTokenizer.from_pretrained('camembert-base')
_lowercase : Optional[Any] = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
_lowercase : int = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 86 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : int = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "bloom"
a__ : List[Any] = ["past_key_values"]
a__ : Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , _lowercase : Dict=25_08_80 , _lowercase : str=64 , _lowercase : int=2 , _lowercase : Union[str, Any]=8 , _lowercase : Optional[Any]=1E-5 , _lowercase : Dict=0.02 , _lowercase : Optional[int]=True , _lowercase : Any=1 , _lowercase : Dict=2 , _lowercase : Optional[Any]=False , _lowercase : Union[str, Any]=0.0 , _lowercase : str=0.0 , _lowercase : str=1 , _lowercase : int=False , **_lowercase : List[str] , ):
__UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
__UpperCAmelCase = kwargs.pop('''n_embed''' , _lowercase )
__UpperCAmelCase = hidden_size if n_embed is None else n_embed
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_range
__UpperCAmelCase = use_cache
__UpperCAmelCase = pretraining_tp
__UpperCAmelCase = apply_residual_connection_post_layernorm
__UpperCAmelCase = hidden_dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = slow_but_exact
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = version.parse("1.12" )
def __init__( self : Optional[int] , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ):
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , '''pad_token_id''' , _lowercase ):
# TODO: how to do that better?
__UpperCAmelCase = 0
@property
def a ( self : Optional[int] ):
__UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' , inverted_values_shape=_lowercase )
__UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a ( self : Any ):
return self._config.n_layer
@property
def a ( self : Tuple ):
return self._config.n_head
@property
def a ( self : Dict ):
return 1E-3
def a ( self : List[str] , _lowercase : "PreTrainedTokenizer" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional["TensorType"] = None , ):
__UpperCAmelCase = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase = seqlen + 2
__UpperCAmelCase = self._config.hidden_size // self.num_attention_heads
__UpperCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__UpperCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__UpperCAmelCase = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
__UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
__UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
__UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def a ( self : Any ):
return 13
| 86 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]):
if isinstance(_lowerCamelCase , torch.Tensor):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image):
lowercase__ : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image):
lowercase__ : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
lowercase__ : Tuple = np.concatenate(_lowerCamelCase , axis=0)
lowercase__ : Any = np.array(_lowerCamelCase).astype(np.floataa) / 255.0
lowercase__ : List[Any] = image.transpose(0 , 3 , 1 , 2)
lowercase__ : Tuple = 2.0 * image - 1.0
lowercase__ : Optional[int] = torch.from_numpy(_lowerCamelCase)
elif isinstance(image[0] , torch.Tensor):
lowercase__ : Any = torch.cat(_lowerCamelCase , dim=0)
return image
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str=0.9995):
if not isinstance(_lowerCamelCase , np.ndarray):
lowercase__ : List[str] = True
lowercase__ : Tuple = va.device
lowercase__ : Union[str, Any] = va.cpu().numpy()
lowercase__ : str = va.cpu().numpy()
lowercase__ : Tuple = np.sum(va * va / (np.linalg.norm(_lowerCamelCase) * np.linalg.norm(_lowerCamelCase)))
if np.abs(_lowerCamelCase) > DOT_THRESHOLD:
lowercase__ : Any = (1 - t) * va + t * va
else:
lowercase__ : Any = np.arccos(_lowerCamelCase)
lowercase__ : List[Any] = np.sin(_lowerCamelCase)
lowercase__ : List[Any] = theta_a * t
lowercase__ : Optional[Any] = np.sin(_lowerCamelCase)
lowercase__ : List[Any] = np.sin(theta_a - theta_t) / sin_theta_a
lowercase__ : Dict = sin_theta_t / sin_theta_a
lowercase__ : List[Any] = sa * va + sa * va
if inputs_are_torch:
lowercase__ : Tuple = torch.from_numpy(_lowerCamelCase).to(_lowerCamelCase)
return va
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]):
lowercase__ : Any = F.normalize(_lowerCamelCase , dim=-1)
lowercase__ : Optional[Any] = F.normalize(_lowerCamelCase , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
for param in model.parameters():
lowercase__ : str = value
class snake_case_ ( __A ):
def __init__( self : str , lowercase_ : AutoencoderKL , lowercase_ : CLIPTextModel , lowercase_ : CLIPModel , lowercase_ : CLIPTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowercase_ : CLIPFeatureExtractor , lowercase_ : Dict=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=lowercase_ , text_encoder=lowercase_ , clip_model=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , coca_model=lowercase_ , coca_tokenizer=lowercase_ , coca_transform=lowercase_ , )
lowercase__ : int = (
feature_extractor.size
if isinstance(feature_extractor.size , lowercase_ )
else feature_extractor.size["shortest_edge"]
)
lowercase__ : Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowercase_ )
set_requires_grad(self.clip_model , lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
self.enable_attention_slicing(lowercase_ )
def __UpperCamelCase ( self : Tuple ) -> int:
set_requires_grad(self.vae , lowercase_ )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
set_requires_grad(self.vae , lowercase_ )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
set_requires_grad(self.unet , lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
set_requires_grad(self.unet , lowercase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict ) -> List[Any]:
# get the original timestep using init_timestep
lowercase__ : Any = min(int(num_inference_steps * strength ) , lowercase_ )
lowercase__ : Optional[int] = max(num_inference_steps - init_timestep , 0 )
lowercase__ : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Tuple=None ) -> Union[str, Any]:
if not isinstance(lowercase_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(lowercase_ )}''' )
lowercase__ : int = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Tuple = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
lowercase__ : Any = torch.cat(lowercase_ , dim=0 )
else:
lowercase__ : List[str] = self.vae.encode(lowercase_ ).latent_dist.sample(lowercase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : Union[str, Any] = 0.1_82_15 * init_latents
lowercase__ : Optional[Any] = init_latents.repeat_interleave(lowercase_ , dim=0 )
lowercase__ : int = randn_tensor(init_latents.shape , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
lowercase__ : str = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = init_latents
return latents
def __UpperCamelCase ( self : Dict , lowercase_ : Tuple ) -> List[str]:
lowercase__ : int = self.coca_transform(lowercase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowercase__ : int = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowercase__ : List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Any ) -> int:
lowercase__ : Optional[int] = self.feature_extractor.preprocess(lowercase_ )
lowercase__ : List[Any] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
lowercase__ : int = self.clip_model.get_image_features(lowercase_ )
lowercase__ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase_ )
lowercase__ : Tuple = image_embeddings_clip.repeat_interleave(lowercase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int , ) -> Optional[int]:
lowercase__ : Dict = latents.detach().requires_grad_()
lowercase__ : List[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
lowercase__ : Optional[Any] = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowercase__ : Optional[int] = self.scheduler.alphas_cumprod[timestep]
lowercase__ : Tuple = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Union[str, Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowercase__ : Optional[Any] = torch.sqrt(lowercase_ )
lowercase__ : List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowercase_ ):
lowercase__ : Tuple = self.scheduler.sigmas[index]
lowercase__ : Any = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : Dict = 1 / 0.1_82_15 * sample
lowercase__ : Optional[Any] = self.vae.decode(lowercase_ ).sample
lowercase__ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : List[str] = transforms.Resize(self.feature_extractor_size )(lowercase_ )
lowercase__ : Optional[Any] = self.normalize(lowercase_ ).to(latents.dtype )
lowercase__ : List[Any] = self.clip_model.get_image_features(lowercase_ )
lowercase__ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase_ )
lowercase__ : int = spherical_dist_loss(lowercase_ , lowercase_ ).mean() * clip_guidance_scale
lowercase__ : int = -torch.autograd.grad(lowercase_ , lowercase_ )[0]
if isinstance(self.scheduler , lowercase_ ):
lowercase__ : Optional[int] = latents.detach() + grads * (sigma**2)
lowercase__ : Optional[int] = noise_pred_original
else:
lowercase__ : Union[str, Any] = noise_pred_original - torch.sqrt(lowercase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Tuple , lowercase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowercase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[int] = 5_12 , lowercase_ : Optional[int] = 5_12 , lowercase_ : float = 0.6 , lowercase_ : Optional[int] = 50 , lowercase_ : Optional[float] = 7.5 , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[float] = 1_00 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : float = 0.8 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , ) -> Dict:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(lowercase_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(lowercase_ , torch.Generator ) and batch_size > 1:
lowercase__ : List[str] = [generator] + [None] * (batch_size - 1)
lowercase__ : str = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
lowercase__ : Dict = [x[0] for x in coca_is_none if x[1]]
lowercase__ : int = ", ".join(lowercase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowercase_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowercase__ : Dict = self.get_image_description(lowercase_ )
if style_prompt is None:
if len(lowercase_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowercase__ : Optional[int] = self.get_image_description(lowercase_ )
# get prompt text embeddings for content and style
lowercase__ : Tuple = self.tokenizer(
lowercase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowercase_ , return_tensors="pt" , )
lowercase__ : int = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowercase__ : List[str] = self.tokenizer(
lowercase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowercase_ , return_tensors="pt" , )
lowercase__ : Optional[Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowercase__ : int = slerp(lowercase_ , lowercase_ , lowercase_ )
# duplicate text embeddings for each generation per prompt
lowercase__ : Any = text_embeddings.repeat_interleave(lowercase_ , dim=0 )
# set timesteps
lowercase__ : Optional[int] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowercase__ : Optional[Any] = {}
if accepts_offset:
lowercase__ : Optional[Any] = 1
self.scheduler.set_timesteps(lowercase_ , **lowercase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowercase__ , lowercase__ : Optional[int] = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowercase__ : str = timesteps[:1].repeat(lowercase_ )
# Preprocess image
lowercase__ : int = preprocess(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Tuple = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , text_embeddings.dtype , self.device , lowercase_ )
lowercase__ : List[Any] = preprocess(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , text_embeddings.dtype , self.device , lowercase_ )
lowercase__ : Any = slerp(lowercase_ , lowercase_ , lowercase_ )
if clip_guidance_scale > 0:
lowercase__ : Tuple = self.get_clip_image_embeddings(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = self.get_clip_image_embeddings(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = slerp(
lowercase_ , lowercase_ , lowercase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ : int = content_text_input.input_ids.shape[-1]
lowercase__ : Optional[Any] = self.tokenizer([""] , padding="max_length" , max_length=lowercase_ , return_tensors="pt" )
lowercase__ : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowercase__ : Union[str, Any] = uncond_embeddings.repeat_interleave(lowercase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowercase__ : str = torch.randn(lowercase_ , generator=lowercase_ , device="cpu" , dtype=lowercase_ ).to(
self.device )
else:
lowercase__ : Tuple = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase__ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : List[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : List[str] = {}
if accepts_eta:
lowercase__ : List[str] = eta
# check if the scheduler accepts generator
lowercase__ : Optional[Any] = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowercase__ : Optional[int] = generator
with self.progress_bar(total=lowercase_ ):
for i, t in enumerate(lowercase_ ):
# expand the latents if we are doing classifier free guidance
lowercase__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ : List[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
lowercase__ : Tuple = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ : int = noise_pred.chunk(2 )
lowercase__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowercase__ : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowercase__ , lowercase__ : Dict = self.cond_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Tuple = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : Union[str, Any] = 1 / 0.1_82_15 * latents
lowercase__ : Optional[Any] = self.vae.decode(lowercase_ ).sample
lowercase__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Optional[int] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_ )
| 87 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1000 , ) -> int:
snake_case_ : Union[str, Any] = parent
snake_case_ : Dict = batch_size
snake_case_ : Dict = seq_length
snake_case_ : Optional[Any] = is_training
snake_case_ : List[str] = use_input_mask
snake_case_ : List[str] = use_token_type_ids
snake_case_ : str = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : int = type_sequence_label_size
snake_case_ : Tuple = initializer_range
snake_case_ : List[Any] = num_labels
snake_case_ : Tuple = scope
snake_case_ : str = range_bbox
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : List[Any] = bbox[i, j, 3]
snake_case_ : Union[str, Any] = bbox[i, j, 1]
snake_case_ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : Optional[Any] = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Dict = t
snake_case_ : Tuple = None
if self.use_input_mask:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : int = None
snake_case_ : Union[str, Any] = None
if self.use_labels:
snake_case_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Optional[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Any:
snake_case_ : Dict = LiltModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : int = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
snake_case_ : int = self.num_labels
snake_case_ : Optional[Any] = LiltForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Any = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[Any]:
snake_case_ : str = LiltForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Optional[Any] = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Dict = self.prepare_config_and_inputs()
(
snake_case_
) : Any = config_and_inputs
snake_case_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A : Tuple = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A : Optional[Any] = False
A : Any = False
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
return True
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Dict = LiltModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> int:
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : Dict = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = LiltModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Any = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = torch.tensor([[1, 2]] , device=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case_ : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
snake_case_ : str = torch.Size([1, 2, 768] )
snake_case_ : Optional[Any] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , _SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 350 |
lowercase : Optional[int] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 36 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.