code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case (unittest.TestCase ):
lowerCAmelCase__ :Dict = JukeboxTokenizer
lowerCAmelCase__ :List[str] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def _a ( self ) -> Dict:
import torch
lowercase__ = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
lowercase__ = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase__ = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def _a ( self ) -> Optional[Any]:
import torch
lowercase__ = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
lowercase__ = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase__ = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 267 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_A = TypeVar('''T''')
def __UpperCamelCase ( _A ):
return (position - 1) // 2
def __UpperCamelCase ( _A ):
return (2 * position) + 1
def __UpperCamelCase ( _A ):
return (2 * position) + 2
class A ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowerCAmelCase_ = []
lowerCAmelCase_ = {}
lowerCAmelCase_ = 0
def __len__( self ):
"""simple docstring"""
return self.elements
def __repr__( self ):
"""simple docstring"""
return str(self.heap )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.elements == 0
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
self.heap.append((elem, weight) )
lowerCAmelCase_ = self.elements
self.elements += 1
self._bubble_up(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0, self.elements - 1 )
lowerCAmelCase_ , lowerCAmelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase_ , lowerCAmelCase_ = self.heap[0]
self._bubble_down(UpperCamelCase__ )
return elem
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.position_map[elem]
lowerCAmelCase_ = (elem, weight)
if position > 0:
lowerCAmelCase_ = get_parent_position(UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCamelCase__ )
else:
self._bubble_down(UpperCamelCase__ )
else:
self._bubble_down(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase_ = get_parent_position(UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ = self.heap[curr_pos]
lowerCAmelCase_ , lowerCAmelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCamelCase__, UpperCamelCase__ )
return self._bubble_up(UpperCamelCase__ )
return None
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.position_map[elem]
lowerCAmelCase_ , lowerCAmelCase_ = self.heap[curr_pos]
lowerCAmelCase_ = get_child_left_position(UpperCamelCase__ )
lowerCAmelCase_ = get_child_right_position(UpperCamelCase__ )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase_ , lowerCAmelCase_ = self.heap[child_left_position]
lowerCAmelCase_ , lowerCAmelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCamelCase__, UpperCamelCase__ )
return self._bubble_down(UpperCamelCase__ )
if child_left_position < self.elements:
lowerCAmelCase_ , lowerCAmelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCamelCase__, UpperCamelCase__ )
return self._bubble_down(UpperCamelCase__ )
else:
return None
if child_right_position < self.elements:
lowerCAmelCase_ , lowerCAmelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCamelCase__, UpperCamelCase__ )
return self._bubble_down(UpperCamelCase__ )
return None
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.heap[nodea_pos][0]
lowerCAmelCase_ = self.heap[nodea_pos][0]
lowerCAmelCase_ , lowerCAmelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase_ = nodea_pos
lowerCAmelCase_ = nodea_pos
class A ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowerCAmelCase_ = {}
lowerCAmelCase_ = 0
def __repr__( self ):
"""simple docstring"""
return str(self.connections )
def __len__( self ):
"""simple docstring"""
return self.nodes
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if node not in self.connections:
lowerCAmelCase_ = {}
self.nodes += 1
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
self.add_node(UpperCamelCase__ )
self.add_node(UpperCamelCase__ )
lowerCAmelCase_ = weight
lowerCAmelCase_ = weight
def __UpperCamelCase ( _A , ):
lowerCAmelCase_ = {node: maxsize for node in graph.connections}
lowerCAmelCase_ = {node: None for node in graph.connections}
lowerCAmelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_A , _A )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase_ = priority_queue.extract_min()
lowerCAmelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_A , dist[neighbour] )
lowerCAmelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_A , dist[neighbour] )
lowerCAmelCase_ = node
return dist, parent
| 325 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 325 | 1 |
'''simple docstring'''
from functools import reduce
UpperCAmelCase_ : List[str] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _UpperCamelCase (_lowerCamelCase : str = N )-> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowerCamelCase , _lowerCamelCase : str(int(_lowerCamelCase ) * int(_lowerCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(_lowerCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | def UpperCAmelCase__( __UpperCAmelCase : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case : str = 4
__snake_case : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case : List[str] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 576 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
a_ : Any = LEDConfig
a_ : str = {}
a_ : List[str] = "gelu"
def __init__( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]=1_3 , _lowerCamelCase : int=7 , _lowerCamelCase : str=True , _lowerCamelCase : Any=False , _lowerCamelCase : str=9_9 , _lowerCamelCase : str=3_2 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : List[Any]=3_7 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Dict=2_0 , _lowerCamelCase : str=2 , _lowerCamelCase : Dict=1 , _lowerCamelCase : Any=0 , _lowerCamelCase : List[Any]=4 , ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : str = seq_length
__lowerCamelCase : str = is_training
__lowerCamelCase : Any = use_labels
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : Dict = num_attention_heads
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : Dict = attention_probs_dropout_prob
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : int = eos_token_id
__lowerCamelCase : Dict = pad_token_id
__lowerCamelCase : Optional[Any] = bos_token_id
__lowerCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__lowerCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__lowerCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__lowerCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
__lowerCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
__lowerCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def _snake_case ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : int ):
'''simple docstring'''
__lowerCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
__lowerCamelCase : Tuple = inputs_dict["input_ids"]
__lowerCamelCase : int = input_ids[:1, :]
__lowerCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
__lowerCamelCase : List[Any] = 1
# first forward pass
__lowerCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
__lowerCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase : Tuple = model(__a , attention_mask=__a )[0]
__lowerCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def _UpperCAmelCase ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
__lowerCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _UpperCamelCase ( _UpperCamelCase,_UpperCamelCase,unittest.TestCase ):
'''simple docstring'''
a_ : Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
a_ : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
a_ : List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ : Tuple = True
a_ : str = False
a_ : Optional[Any] = False
a_ : int = False
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : int = TFLEDModelTester(self )
__lowerCamelCase : Any = ConfigTester(self , config_class=__a )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[int] = tf.zeros_like(inputs_dict["""attention_mask"""] )
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
__lowerCamelCase : Dict = True
__lowerCamelCase : str = self.model_tester.seq_length
__lowerCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_lowerCamelCase : Optional[int] ):
__lowerCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_lowerCamelCase : Optional[Any] ):
__lowerCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
__lowerCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : Optional[int] = model_class(__a )
__lowerCamelCase : int = model(self._prepare_for_class(__a , __a ) )
__lowerCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowerCamelCase : Optional[Any] = model_class(__a )
__lowerCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCamelCase : int = True
__lowerCamelCase : Tuple = model_class(__a )
__lowerCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowerCamelCase : Any = True
__lowerCamelCase : List[str] = True
__lowerCamelCase : Tuple = model_class(__a )
__lowerCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def _snake_case ( self : str ):
'''simple docstring'''
pass
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] ):
"""simple docstring"""
return tf.constant(lowercase_ , dtype=tf.intaa )
__UpperCamelCase : List[Any] = 1E-4
@slow
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
__lowerCamelCase : int = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__lowerCamelCase : Tuple = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__lowerCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
__lowerCamelCase : Optional[int] = model(**__a )[0]
__lowerCamelCase : Optional[int] = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , __a )
# change to expected output here
__lowerCamelCase : Tuple = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
__lowerCamelCase : Optional[int] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__lowerCamelCase : List[str] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__lowerCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
__lowerCamelCase : Union[str, Any] = model(**__a )[0]
__lowerCamelCase : int = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
__lowerCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 , rtol=1E-3 )
| 707 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 458 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=8 ) -> Dict:
"""simple docstring"""
lowerCAmelCase = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCAmelCase = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_ , ) -> Dict:
super().__init__()
self.register_modules(
text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , movq=A_ , )
lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if latents is None:
lowerCAmelCase = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCAmelCase = latents.to(A_ )
lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self , A_ , A_ , A_ , A_ , A_=None , ) -> Tuple:
lowerCAmelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
lowerCAmelCase = self.tokenizer(
A_ , padding="""max_length""" , truncation=A_ , max_length=77 , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors="""pt""" , )
lowerCAmelCase = text_inputs.input_ids
lowerCAmelCase = self.tokenizer(A_ , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A_ , A_ ):
lowerCAmelCase = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowerCAmelCase = text_input_ids.to(A_ )
lowerCAmelCase = text_inputs.attention_mask.to(A_ )
lowerCAmelCase, lowerCAmelCase = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
lowerCAmelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
lowerCAmelCase = text_encoder_hidden_states.repeat_interleave(A_ , dim=0 )
lowerCAmelCase = text_mask.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase = 42
if negative_prompt is None:
lowerCAmelCase = [""""""] * batch_size
elif type(A_ ) is not type(A_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(A_ )} !='
f' {type(A_ )}.' )
elif isinstance(A_ , A_ ):
lowerCAmelCase = [negative_prompt]
elif batch_size != len(A_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(A_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
lowerCAmelCase = negative_prompt
lowerCAmelCase = self.tokenizer(
A_ , padding="""max_length""" , max_length=77 , truncation=A_ , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors="""pt""" , )
lowerCAmelCase = uncond_input.input_ids.to(A_ )
lowerCAmelCase = uncond_input.attention_mask.to(A_ )
lowerCAmelCase, lowerCAmelCase = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase = negative_prompt_embeds.shape[1]
lowerCAmelCase = negative_prompt_embeds.repeat(1 , A_ )
lowerCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ )
lowerCAmelCase = uncond_text_encoder_hidden_states.shape[1]
lowerCAmelCase = uncond_text_encoder_hidden_states.repeat(1 , A_ , 1 )
lowerCAmelCase = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , A_ , -1 )
lowerCAmelCase = uncond_text_mask.repeat_interleave(A_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCAmelCase = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCAmelCase = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __snake_case ( self , A_=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase = torch.device(f'cuda:{gpu_id}' )
lowerCAmelCase = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def __snake_case ( self , A_=0 ) -> Tuple:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCAmelCase, lowerCAmelCase = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
if self.safety_checker is not None:
lowerCAmelCase, lowerCAmelCase = cpu_offload_with_hook(self.safety_checker , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self ) -> Any:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ , A_ = None , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ) -> Optional[Any]:
if isinstance(A_ , A_ ):
lowerCAmelCase = 1
elif isinstance(A_ , A_ ):
lowerCAmelCase = len(A_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A_ )}' )
lowerCAmelCase = self._execution_device
lowerCAmelCase = batch_size * num_images_per_prompt
lowerCAmelCase = guidance_scale > 1.0
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = self._encode_prompt(
A_ , A_ , A_ , A_ , A_ )
if isinstance(A_ , A_ ):
lowerCAmelCase = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
lowerCAmelCase = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase = image_embeds.repeat_interleave(A_ , dim=0 )
lowerCAmelCase = negative_image_embeds.repeat_interleave(A_ , dim=0 )
lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
lowerCAmelCase = self.scheduler.timesteps
lowerCAmelCase = self.unet.config.in_channels
lowerCAmelCase, lowerCAmelCase = get_new_h_w(A_ , A_ , self.movq_scale_factor )
# create initial latent
lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
lowerCAmelCase = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
lowerCAmelCase, lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase, lowerCAmelCase = noise_pred.chunk(2 )
lowerCAmelCase, lowerCAmelCase = variance_pred.chunk(2 )
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase, lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , ).prev_sample
# post-processing
lowerCAmelCase = self.movq.decode(A_ , force_not_quantize=A_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCAmelCase = image * 0.5 + 0.5
lowerCAmelCase = image.clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 433 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ) -> int:
"""simple docstring"""
lowerCAmelCase, lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCAmelCase = 0
count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 433 | 1 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _snake_case ( _a ):
_lowercase : Optional[int] = ["""image_processor"""]
_lowercase : List[Any] = """SamImageProcessor"""
def __init__( self , a) -> Any:
super().__init__(snake_case_)
SCREAMING_SNAKE_CASE = self.image_processor
SCREAMING_SNAKE_CASE = -10
SCREAMING_SNAKE_CASE = self.image_processor.size['longest_edge']
def __call__( self , a=None , a=None , a=None , a=None , a = None , **a , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = self.image_processor(
snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# pop arguments that are not used in the foward but used nevertheless
SCREAMING_SNAKE_CASE = encoding_image_processor['original_sizes']
if hasattr(snake_case_ , 'numpy'): # Checks if Torch or TF tensor
SCREAMING_SNAKE_CASE = original_sizes.numpy()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._check_and_preprocess_points(
input_points=snake_case_ , input_labels=snake_case_ , input_boxes=snake_case_ , )
SCREAMING_SNAKE_CASE = self._normalize_and_convert(
snake_case_ , snake_case_ , input_points=snake_case_ , input_labels=snake_case_ , input_boxes=snake_case_ , return_tensors=snake_case_ , )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=None , a=None , a=None , a="pt" , ) -> Tuple:
if input_points is not None:
if len(snake_case_) != len(snake_case_):
SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , snake_case_ , original_sizes[0]) for point in input_points
]
else:
SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , snake_case_ , snake_case_)
for point, original_size in zip(snake_case_ , snake_case_)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._pad_points_and_labels(snake_case_ , snake_case_)
SCREAMING_SNAKE_CASE = np.array(snake_case_)
if input_labels is not None:
SCREAMING_SNAKE_CASE = np.array(snake_case_)
if input_boxes is not None:
if len(snake_case_) != len(snake_case_):
SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , snake_case_ , original_sizes[0] , is_bounding_box=snake_case_)
for box in input_boxes
]
else:
SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , snake_case_ , snake_case_ , is_bounding_box=snake_case_)
for box, original_size in zip(snake_case_ , snake_case_)
]
SCREAMING_SNAKE_CASE = np.array(snake_case_)
if input_boxes is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case_)
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(snake_case_)
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE = tf.expand_dims(snake_case_ , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes})
if input_points is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case_)
# point batch size of 1 by default
SCREAMING_SNAKE_CASE = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(snake_case_)
# point batch size of 1 by default
SCREAMING_SNAKE_CASE = tf.expand_dims(snake_case_ , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({'input_points': input_points})
if input_labels is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case_)
# point batch size of 1 by default
SCREAMING_SNAKE_CASE = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(snake_case_)
# point batch size of 1 by default
SCREAMING_SNAKE_CASE = tf.expand_dims(snake_case_ , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels})
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = max([point.shape[0] for point in input_points])
SCREAMING_SNAKE_CASE = []
for i, point in enumerate(snake_case_):
if point.shape[0] != expected_nb_points:
SCREAMING_SNAKE_CASE = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
SCREAMING_SNAKE_CASE = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(snake_case_)
SCREAMING_SNAKE_CASE = processed_input_points
return input_points, input_labels
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a=False) -> np.ndarray:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = original_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor._get_preprocess_shape(snake_case_ , longest_edge=snake_case_)
SCREAMING_SNAKE_CASE = deepcopy(snake_case_).astype(snake_case_)
if is_bounding_box:
SCREAMING_SNAKE_CASE = coords.reshape(-1 , 2 , 2)
SCREAMING_SNAKE_CASE = coords[..., 0] * (new_w / old_w)
SCREAMING_SNAKE_CASE = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
SCREAMING_SNAKE_CASE = coords.reshape(-1 , 4)
return coords
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a=None , ) -> Union[str, Any]:
if input_points is not None:
if hasattr(snake_case_ , 'numpy'): # Checks for TF or Torch tensor
SCREAMING_SNAKE_CASE = input_points.numpy().tolist()
if not isinstance(snake_case_ , snake_case_) or not isinstance(input_points[0] , snake_case_):
raise ValueError('Input points must be a list of list of floating points.')
SCREAMING_SNAKE_CASE = [np.array(snake_case_) for input_point in input_points]
else:
SCREAMING_SNAKE_CASE = None
if input_labels is not None:
if hasattr(snake_case_ , 'numpy'):
SCREAMING_SNAKE_CASE = input_labels.numpy().tolist()
if not isinstance(snake_case_ , snake_case_) or not isinstance(input_labels[0] , snake_case_):
raise ValueError('Input labels must be a list of list integers.')
SCREAMING_SNAKE_CASE = [np.array(snake_case_) for label in input_labels]
else:
SCREAMING_SNAKE_CASE = None
if input_boxes is not None:
if hasattr(snake_case_ , 'numpy'):
SCREAMING_SNAKE_CASE = input_boxes.numpy().tolist()
if (
not isinstance(snake_case_ , snake_case_)
or not isinstance(input_boxes[0] , snake_case_)
or not isinstance(input_boxes[0][0] , snake_case_)
):
raise ValueError('Input boxes must be a list of list of list of floating points.')
SCREAMING_SNAKE_CASE = [np.array(snake_case_).astype(np.floataa) for box in input_boxes]
else:
SCREAMING_SNAKE_CASE = None
return input_points, input_labels, input_boxes
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(snake_case_))
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> Dict:
return self.image_processor.post_process_masks(*snake_case_ , **snake_case_)
| 720 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a_ : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
a_ : Tuple = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
a_ : Optional[Any] = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return float((preds == labels).mean())
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = simple_accuracy(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = float(fa_score(y_true=_UpperCAmelCase , y_pred=_UpperCAmelCase))
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.array(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.array(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = en_sentvecs.shape[0]
# mean centering
SCREAMING_SNAKE_CASE = en_sentvecs - np.mean(_UpperCAmelCase , axis=0)
SCREAMING_SNAKE_CASE = in_sentvecs - np.mean(_UpperCAmelCase , axis=0)
SCREAMING_SNAKE_CASE = cdist(_UpperCAmelCase , _UpperCAmelCase , 'cosine')
SCREAMING_SNAKE_CASE = np.array(range(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = sim.argsort(axis=1)[:, :10]
SCREAMING_SNAKE_CASE = np.any(preds == actual[:, None] , axis=1)
return float(matches.mean())
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[Any]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(a , a)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(a , a)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(a , a)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 444 | 0 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__snake_case : List[Any] = False
try:
__snake_case : Tuple = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class lowercase_ :
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = [] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = "*"
else:
UpperCAmelCase_ = "➔ "
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "" ) -> List[str]:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , UpperCamelCase__ )
else:
forceWrite(self.choices[index] , UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(UpperCamelCase__ )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = 1 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase__ )
move_cursor(UpperCamelCase__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase__ )] for number in range(1_0 )] )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase__ )
else:
return
else:
return
def lowerCamelCase_ ( self , UpperCamelCase__ = 0 ) -> Optional[int]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(UpperCamelCase__ , "\n" )
return choice
| 660 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class snake_case_ ( snake_case__ ):
"""simple docstring"""
__lowerCAmelCase : str ='''conditional_detr'''
__lowerCAmelCase : int =['''past_key_values''']
__lowerCAmelCase : Tuple ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=3 , UpperCamelCase=3_00 , UpperCamelCase=6 , UpperCamelCase=20_48 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=20_48 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=2_56 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0_2 , UpperCamelCase=1.0 , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase="resnet50" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=2 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=2 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.2_5 , **UpperCamelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
lowerCamelCase__ = CONFIG_MAPPING['resnet'](out_features=["stage4"])
elif isinstance(_A , _A):
lowerCamelCase__ = backbone_config.get("model_type")
lowerCamelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ = config_class.from_dict(_A)
lowerCamelCase__ = use_timm_backbone
lowerCamelCase__ = backbone_config
lowerCamelCase__ = num_channels
lowerCamelCase__ = num_queries
lowerCamelCase__ = d_model
lowerCamelCase__ = encoder_ffn_dim
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = encoder_attention_heads
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = init_xavier_std
lowerCamelCase__ = encoder_layerdrop
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = auxiliary_loss
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = backbone
lowerCamelCase__ = use_pretrained_backbone
lowerCamelCase__ = dilation
# Hungarian matcher
lowerCamelCase__ = class_cost
lowerCamelCase__ = bbox_cost
lowerCamelCase__ = giou_cost
# Loss coefficients
lowerCamelCase__ = mask_loss_coefficient
lowerCamelCase__ = dice_loss_coefficient
lowerCamelCase__ = cls_loss_coefficient
lowerCamelCase__ = bbox_loss_coefficient
lowerCamelCase__ = giou_loss_coefficient
lowerCamelCase__ = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A)
@property
def __UpperCAmelCase ( self):
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self):
return self.d_model
def __UpperCAmelCase ( self):
lowerCamelCase__ = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
lowerCamelCase__ = self.backbone_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
class snake_case_ ( snake_case__ ):
"""simple docstring"""
__lowerCAmelCase : List[str] =version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
])
@property
def __UpperCAmelCase ( self):
return 1E-5
@property
def __UpperCAmelCase ( self):
return 12
| 711 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCAmelCase( a__ : list[Any] ):
'''simple docstring'''
create_state_space_tree(a__ , [] , 0 )
def lowerCAmelCase( a__ : list[Any] , a__ : list[Any] , a__ : int ):
'''simple docstring'''
if index == len(a__ ):
print(a__ )
return
create_state_space_tree(a__ , a__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(a__ , a__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 426 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> list:
"""simple docstring"""
__UpperCAmelCase : List[Any] = len(UpperCamelCase )
for _ in range(UpperCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 77 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "openai/whisper-base"
snake_case__ = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
snake_case__ = "transcriber"
snake_case__ = WhisperProcessor
snake_case__ = WhisperForConditionalGeneration
snake_case__ = ["audio"]
snake_case__ = ["text"]
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
return self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_features
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
return self.model.generate(inputs=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
return self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0]
| 466 | 0 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
a : Any = parse(importlib.metadata.version("""torch"""))
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
UpperCAmelCase : Tuple = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Any = parse(importlib.metadata.version(_lowercase ) )
return operation(_lowercase , parse(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]:
return compare_versions(_lowercase , _lowercase , _lowercase )
| 672 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = CycleDiffusionPipeline
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
UpperCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
UpperCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_lowercase : Tuple = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0 )
_lowercase : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_lowercase : int = CLIPTextModel(UpperCamelCase )
_lowercase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowercase : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Dict=0 ):
"""simple docstring"""
_lowercase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_lowercase : Union[str, Any] = image / 2 + 0.5
if str(UpperCamelCase ).startswith('''mps''' ):
_lowercase : Any = torch.manual_seed(UpperCamelCase )
else:
_lowercase : Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_lowercase : Union[str, Any] = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[str] = self.get_dummy_components()
_lowercase : List[Any] = CycleDiffusionPipeline(**UpperCamelCase )
_lowercase : int = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase )
_lowercase : Union[str, Any] = pipe(**UpperCamelCase )
_lowercase : Optional[Any] = output.images
_lowercase : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_lowercase : Any = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : str = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCamelCase , '''half''' ):
_lowercase : Optional[int] = module.half()
_lowercase : str = CycleDiffusionPipeline(**UpperCamelCase )
_lowercase : Any = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : List[str] = self.get_dummy_inputs(UpperCamelCase )
_lowercase : List[str] = pipe(**UpperCamelCase )
_lowercase : List[Any] = output.images
_lowercase : Any = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_lowercase : List[str] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
_lowercase : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
_lowercase : str = init_image.resize((5_12, 5_12) )
_lowercase : Dict = '''CompVis/stable-diffusion-v1-4'''
_lowercase : Tuple = DDIMScheduler.from_pretrained(UpperCamelCase , subfolder='''scheduler''' )
_lowercase : Any = CycleDiffusionPipeline.from_pretrained(
UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
_lowercase : List[Any] = '''A black colored car'''
_lowercase : Optional[int] = '''A blue colored car'''
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : str = pipe(
prompt=UpperCamelCase , source_prompt=UpperCamelCase , image=UpperCamelCase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase , output_type='''np''' , )
_lowercase : List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
_lowercase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
_lowercase : List[Any] = init_image.resize((5_12, 5_12) )
_lowercase : str = '''CompVis/stable-diffusion-v1-4'''
_lowercase : Dict = DDIMScheduler.from_pretrained(UpperCamelCase , subfolder='''scheduler''' )
_lowercase : str = CycleDiffusionPipeline.from_pretrained(UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
_lowercase : List[str] = '''A black colored car'''
_lowercase : List[str] = '''A blue colored car'''
_lowercase : Tuple = torch.manual_seed(0 )
_lowercase : Dict = pipe(
prompt=UpperCamelCase , source_prompt=UpperCamelCase , image=UpperCamelCase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase , output_type='''np''' , )
_lowercase : List[Any] = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 322 |
import os
UpperCamelCase__ = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def UpperCamelCase__ ( UpperCAmelCase_ ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = 0
_lowercase : Dict = 0
while index < len(UpperCAmelCase_ ) - 1:
_lowercase : Any = SYMBOLS[numerals[index]]
_lowercase : List[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase__ ( UpperCAmelCase_ ) -> str:
'''simple docstring'''
_lowercase : List[str] = ''''''
_lowercase : Union[str, Any] = num // 1000
numerals += m_count * "M"
num %= 1000
_lowercase : Tuple = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_lowercase : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase__ ( UpperCAmelCase_ = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_lowercase : List[str] = 0
with open(os.path.dirname(UpperCAmelCase_ ) + roman_numerals_filename ) as filea:
_lowercase : Optional[Any] = filea.readlines()
for line in lines:
_lowercase : int = line.strip()
_lowercase : Dict = parse_roman_numerals(UpperCAmelCase_ )
_lowercase : Optional[Any] = generate_roman_numerals(UpperCAmelCase_ )
savings += len(UpperCAmelCase_ ) - len(UpperCAmelCase_ )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""") | 322 | 1 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_UpperCamelCase = """path-to-your-trained-model"""
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
_UpperCamelCase = """A photo of sks dog in a bucket"""
_UpperCamelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 708 |
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A : Dict = logging.get_logger(__name__)
A : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : int = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
A : Any = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
A : Optional[int] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = ['''input_ids''', '''attention_mask''']
A__ = DistilBertTokenizer
def __init__(self : List[Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]="[UNK]" , _UpperCAmelCase : List[Any]="[SEP]" , _UpperCAmelCase : Any="[PAD]" , _UpperCAmelCase : List[Any]="[CLS]" , _UpperCAmelCase : str="[MASK]" , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _UpperCAmelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(_UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**_UpperCAmelCase )
lowercase__ = do_lower_case
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict=None ) -> Any:
"""simple docstring"""
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 15 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
'''simple docstring'''
from math import factorial, radians
def UpperCAmelCase_ ( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ):
'''simple docstring'''
a_ =angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
a_ =radians(lowercase__ )
a_ =angle_in_radians
a_ =3
a_ =-1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
a_ =-b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 41 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self : List[str] , a : int ):
'''simple docstring'''
lowercase_ : Dict = metric_id
class _UpperCAmelCase :
__lowerCamelCase: Any = [MetricMock(snake_case ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if "tmp_path" in args:
lowercase_ : List[str] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(_UpperCamelCase , match="https://huggingface.co/docs/evaluate" ):
func(*_UpperCamelCase )
| 620 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _UpperCAmelCase ( snake_case , snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Union[str, Any] = StableDiffusionLatentUpscalePipeline
__lowerCamelCase: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowerCamelCase: List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowerCamelCase: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase: Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase: List[Any] = frozenset([] )
__lowerCamelCase: Dict = True
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = 1
lowercase_ : Any = 4
lowercase_ : int = (1_6, 1_6)
lowercase_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a )
return image
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : int = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=a , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=a , only_cross_attention=a , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
lowercase_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
lowercase_ : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
lowercase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="quick_gelu" , projection_dim=5_1_2 , )
lowercase_ : Optional[int] = CLIPTextModel(a )
lowercase_ : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase_ : int = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCAmelCase__ ( self : List[str] , a : int , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : Optional[int] = torch.manual_seed(a )
else:
lowercase_ : Optional[int] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[int] = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : Tuple = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : List[Any] = self.get_dummy_inputs(a )
lowercase_ : List[str] = pipe(**a ).images
lowercase_ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
lowercase_ : int = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
lowercase_ : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1e-3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
lowercase_ : Optional[int] = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Dict = self.get_dummy_inputs(a )
lowercase_ : int = 2
lowercase_ : Dict = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase_ : Optional[int] = getattr(a , scheduler_enum.name )
lowercase_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
lowercase_ : Tuple = pipe(**a )[0]
outputs.append(a )
assert check_same_shape(a )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : str = torch.manual_seed(3_3 )
lowercase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
lowercase_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase_ : Dict = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
lowercase_ : Optional[Any] = pipe(a , generator=a , output_type="latent" ).images
lowercase_ : Tuple = upscaler(
prompt=a , image=a , num_inference_steps=2_0 , guidance_scale=0 , generator=a , output_type="np" , ).images[0]
lowercase_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : List[Any] = torch.manual_seed(3_3 )
lowercase_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
lowercase_ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
lowercase_ : int = upscaler(
prompt=a , image=a , num_inference_steps=2_0 , guidance_scale=0 , generator=a , output_type="np" , ).images[0]
lowercase_ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 620 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> Union[str, Any]:
a__ : Optional[Any] = {}
if train_file is not None:
a__ : str = [train_file]
if eval_file is not None:
a__ : Dict = [eval_file]
if test_file is not None:
a__ : Tuple = [test_file]
a__ : int = datasets.load_dataset("csv" , data_files=__a )
a__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : Any = features_name.pop(__a )
a__ : Union[str, Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : Any = {label: i for i, label in enumerate(__a )}
a__ : Union[str, Any] = tokenizer.model_input_names
a__ : Optional[int] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Any = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Any = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
a__ : Tuple = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : str = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Optional[int] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Any = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Tuple = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Optional[int] = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Union[str, Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : str = trainer.evaluate()
a__ : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 719 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> Union[str, Any]:
a__ : Optional[Any] = {}
if train_file is not None:
a__ : str = [train_file]
if eval_file is not None:
a__ : Dict = [eval_file]
if test_file is not None:
a__ : Tuple = [test_file]
a__ : int = datasets.load_dataset("csv" , data_files=__a )
a__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : Any = features_name.pop(__a )
a__ : Union[str, Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : Any = {label: i for i, label in enumerate(__a )}
a__ : Union[str, Any] = tokenizer.model_input_names
a__ : Optional[int] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Any = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Any = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
a__ : Tuple = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : str = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Optional[int] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Any = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Tuple = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__, a__, a__ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__, a__, a__, a__ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Optional[int] = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Union[str, Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : str = trainer.evaluate()
a__ : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 151 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''pixel_values''']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = size if size is not None else {'shortest_edge': 2_2_4}
__a : Tuple = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
__a : int = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__a : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='crop_size' )
__a : Optional[int] = do_resize
__a : Dict = size
__a : Union[str, Any] = resample
__a : str = do_center_crop
__a : Optional[int] = crop_size
__a : Any = do_rescale
__a : Optional[int] = rescale_factor
__a : int = do_normalize
__a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a : Optional[int] = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__a : Dict = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
__a : str = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
SCREAMING_SNAKE_CASE__ , size=(size_dict['height'], size_dict['width']) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
__a : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Dict , ):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Dict , ):
'''simple docstring'''
__a : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__a : Optional[int] = resample if resample is not None else self.resample
__a : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__a : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__a : Optional[int] = image_mean if image_mean is not None else self.image_mean
__a : List[Any] = image_std if image_std is not None else self.image_std
__a : int = size if size is not None else self.size
__a : int = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = crop_size if crop_size is not None else self.crop_size
__a : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='crop_size' )
__a : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__a : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
__a : Tuple = [self.resize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
__a : Optional[Any] = [self.center_crop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
__a : Optional[int] = [self.rescale(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
__a : Optional[Any] = [self.normalize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
__a : Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
__a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 47 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = CodeGenTokenizer
a = CodeGenTokenizerFast
a = True
a = {'''add_prefix_space''': True}
a = False
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
A_ : Union[str, Any] = dict(zip(a__ , range(len(a__ ) ) ) )
A_ : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ : Tuple = {"""unk_token""": """<unk>"""}
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def _lowerCamelCase ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def _lowerCamelCase ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def _lowerCamelCase ( self , a__ ):
A_ : str = """lower newer"""
A_ : Optional[int] = """lower newer"""
return input_text, output_text
def _lowerCamelCase ( self ):
A_ : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ : List[Any] = """lower newer"""
A_ : int = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A_ : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
A_ : int = tokens + [tokenizer.unk_token]
A_ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
A_ : Optional[Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=a__ )
A_ : Dict = """lower newer"""
# Testing tokenization
A_ : List[str] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
A_ : Dict = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
A_ : int = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
A_ : Any = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
A_ : Any = self.get_rust_tokenizer(add_prefix_space=a__ )
A_ : Dict = tokenizer.encode(a__ , add_prefix_space=a__ )
A_ : Union[str, Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
A_ : List[Any] = tokens + [rust_tokenizer.unk_token]
A_ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def _lowerCamelCase ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _lowerCamelCase ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
A_ : str = """This is a simple input"""
A_ : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
A_ : Optional[int] = ("""This is a simple input""", """This is a pair""")
A_ : Optional[int] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def _lowerCamelCase ( self ):
A_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
A_ : Optional[int] = """This is a simple input"""
A_ : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
A_ : str = ("""This is a simple input""", """This is a pair""")
A_ : int = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
A_ : Optional[Any] = tokenizer.pad_token_id
A_ : Optional[Any] = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
A_ : List[Any] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
A_ : str = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
A_ : List[Any] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _lowerCamelCase ( self ):
A_ : Tuple = """$$$"""
A_ : int = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
A_ : Optional[int] = """This is a simple input"""
A_ : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
A_ : Optional[Any] = tokenizer.bos_token_id
A_ : Optional[int] = tokenizer(a__ )
A_ : Dict = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A_ : int = tokenizer.decode(out_s.input_ids )
A_ : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _lowerCamelCase ( self ):
A_ : List[str] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
A_ : List[str] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
A_ : Dict = """\nif len_a > len_b: result = a\nelse: result = b"""
A_ : Any = tokenizer.encode(a__ )
A_ : List[str] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
A_ : Union[str, Any] = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def _lowerCamelCase ( self ):
pass
| 569 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __A ( UpperCamelCase__ ):
def __init__( self :str , __snake_case :Union[str, "sqlalchemy.sql.Selectable"] , __snake_case :Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __snake_case :Optional[Features] = None , __snake_case :str = None , __snake_case :bool = False , **__snake_case :Optional[Any] , ):
'''simple docstring'''
super().__init__(features=__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case , **__snake_case )
__magic_name__ : int =Sql(
cache_dir=__snake_case , features=__snake_case , sql=__snake_case , con=__snake_case , **__snake_case , )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : List[str] =None
__magic_name__ : Optional[Any] =None
__magic_name__ : Union[str, Any] =None
__magic_name__ : int =None
self.builder.download_and_prepare(
download_config=__snake_case , download_mode=__snake_case , verification_mode=__snake_case , base_path=__snake_case , )
# Build dataset for splits
__magic_name__ : Optional[int] =self.builder.as_dataset(
split="""train""" , verification_mode=__snake_case , in_memory=self.keep_in_memory )
return dataset
class __A :
def __init__( self :List[str] , __snake_case :Dataset , __snake_case :str , __snake_case :Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , **__snake_case :Optional[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
__magic_name__ : Union[str, Any] =dataset
__magic_name__ : int =name
__magic_name__ : int =con
__magic_name__ : Union[str, Any] =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__magic_name__ : Optional[Any] =num_proc
__magic_name__ : Tuple =to_sql_kwargs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : str =self.to_sql_kwargs.pop("""sql""" , __snake_case )
__magic_name__ : List[Any] =self.to_sql_kwargs.pop("""con""" , __snake_case )
__magic_name__ : Any =self.to_sql_kwargs.pop("""index""" , __snake_case )
__magic_name__ : Dict =self._write(index=__snake_case , **self.to_sql_kwargs )
return written
def A__ ( self :Tuple , __snake_case :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =args
__magic_name__ : Any ={**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
__magic_name__ : List[str] =query_table(
table=self.dataset.data , key=slice(__snake_case , offset + self.batch_size ) , indices=self.dataset._indices , )
__magic_name__ : List[Any] =batch.to_pandas()
__magic_name__ : Optional[Any] =df.to_sql(self.name , self.con , index=__snake_case , **__snake_case )
return num_rows or len(__snake_case )
def A__ ( self :Any , __snake_case :Optional[Any] , **__snake_case :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__magic_name__ , __magic_name__ : Union[str, Any] =len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __snake_case , __snake_case )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 367 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase_ ( *lowerCamelCase , lowerCamelCase = None , lowerCamelCase=True , lowerCamelCase=2 ):
from .. import __version__
__magic_name__ : Optional[int] =take_from
__magic_name__ : Tuple =()
if not isinstance(args[0] , lowerCamelCase ):
__magic_name__ : List[Any] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCamelCase ).base_version ) >= version.parse(lowerCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
__magic_name__ : List[str] =None
if isinstance(lowerCamelCase , lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCamelCase ),)
__magic_name__ : List[str] =F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(lowerCamelCase , lowerCamelCase ):
values += (getattr(lowerCamelCase , lowerCamelCase ),)
__magic_name__ : List[str] =F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
__magic_name__ : List[str] =F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
__magic_name__ : List[Any] =warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowerCamelCase , stacklevel=lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) > 0:
__magic_name__ : List[Any] =inspect.getouterframes(inspect.currentframe() )[1]
__magic_name__ : Optional[int] =call_frame.filename
__magic_name__ : Tuple =call_frame.lineno
__magic_name__ : str =call_frame.function
__magic_name__ , __magic_name__ : Optional[int] =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(lowerCamelCase ) == 0:
return
elif len(lowerCamelCase ) == 1:
return values[0]
return values
| 367 | 1 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def _A( self ):
lowercase =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _A( self ):
with self.assertRaises(snake_case_ ):
lowercase =pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _A( self ):
with self.assertRaises(snake_case_ ):
lowercase =pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def _A( self ):
lowercase =pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _A( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase =pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def _A( self ):
lowercase =pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _A( self ):
lowercase =pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def _A( self ):
lowercase =pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def _A( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase =pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def _A( self ):
lowercase =pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def _A( self ):
lowercase =pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _A( self ):
import PIL.Image
lowercase =PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=snake_case_ ) as mock_cast_to_python_objects:
lowercase =pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image] , type=Image() ) )
lowercase , lowercase =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , snake_case_ )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def UpperCamelCase ( lowercase_ : Any , lowercase_ : int ) -> Optional[int]:
'''simple docstring'''
lowercase =pa.BufferReader(lowercase_ ) if isinstance(lowercase_ , pa.Buffer ) else pa.memory_map(lowercase_ )
lowercase =pa.ipc.open_stream(lowercase_ )
lowercase =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase =pa.BufferOutputStream()
lowercase =pa.schema(lowercase_ ) if fields else None
with ArrowWriter(stream=lowercase_ , schema=lowercase_ , writer_batch_size=lowercase_ ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase , lowercase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(lowercase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase =pa.BufferOutputStream()
lowercase =Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=lowercase_ , features=lowercase_ ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
lowercase , lowercase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowercase =pa.BufferReader(output.getvalue() )
lowercase =pa.ipc.open_stream(lowercase_ )
lowercase =f.read_all()
lowercase =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowercase_ )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] )
def UpperCamelCase ( lowercase_ : List[str] ) -> str:
'''simple docstring'''
lowercase =pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase_ , writer_batch_size=lowercase_ , hash_salt='''split_name''' , check_duplicates=lowercase_ , ) as writer:
with pytest.raises(lowercase_ ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
lowercase , lowercase =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 1_0] )
def UpperCamelCase ( lowercase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase =pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase_ , writer_batch_size=lowercase_ , hash_salt='''split_name''' , check_duplicates=lowercase_ , ) as writer:
with pytest.raises(lowercase_ ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1_0 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=1_0 )
lowercase , lowercase =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 1_0] )
def UpperCamelCase ( lowercase_ : List[Any] ) -> Any:
'''simple docstring'''
lowercase =pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase_ , writer_batch_size=lowercase_ , hash_salt='''split_name''' , check_duplicates=lowercase_ , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
lowercase , lowercase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def UpperCamelCase ( lowercase_ : Any , lowercase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
lowercase =pa.BufferOutputStream()
lowercase =pa.schema(lowercase_ ) if fields else None
with ArrowWriter(stream=lowercase_ , schema=lowercase_ , writer_batch_size=lowercase_ ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
lowercase , lowercase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(lowercase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def UpperCamelCase ( lowercase_ : str , lowercase_ : List[str] ) -> int:
'''simple docstring'''
lowercase =pa.BufferOutputStream()
lowercase =pa.schema(lowercase_ ) if fields else None
with ArrowWriter(stream=lowercase_ , schema=lowercase_ , writer_batch_size=lowercase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
lowercase , lowercase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(lowercase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> int:
'''simple docstring'''
lowercase =pa.BufferOutputStream()
lowercase =pa.schema(lowercase_ ) if fields else None
with ArrowWriter(stream=lowercase_ , schema=lowercase_ , writer_batch_size=lowercase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
lowercase , lowercase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(lowercase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
lowercase =os.path.join(lowercase_ , '''test.arrow''' )
with ArrowWriter(path=lowercase_ , schema=pa.schema(lowercase_ ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
lowercase , lowercase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowercase_ , metadata=writer._schema.metadata )
_check_output(lowercase_ , 1 )
def UpperCamelCase ( lowercase_ : List[str] ) -> Dict:
'''simple docstring'''
if pa.types.is_list(lowercase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def UpperCamelCase ( lowercase_ : Dict , lowercase_ : str ) -> Optional[int]:
'''simple docstring'''
if isinstance(lst[0] , lowercase_ ):
change_first_primitive_element_in_list(lst[0] , lowercase_ )
else:
lowercase =value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase ( lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase =pa.array(TypedSequence(lowercase_ , optimized_int_type=lowercase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
lowercase =pa.array(OptimizedTypedSequence(lowercase_ , col=lowercase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowercase =copy.deepcopy(lowercase_ )
lowercase =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowercase_ , lowercase_ )
lowercase =pa.array(OptimizedTypedSequence(lowercase_ , col=lowercase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def UpperCamelCase ( lowercase_ : Any , lowercase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase =str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=lowercase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def UpperCamelCase ( lowercase_ : List[Any] ) -> str:
'''simple docstring'''
lowercase ='''mock://dataset-train.arrow'''
with ArrowWriter(path=lowercase_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowercase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase , lowercase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowercase_ )
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
lowercase =pa.BufferOutputStream()
with ParquetWriter(stream=lowercase_ ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase , lowercase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowercase =pa.BufferReader(output.getvalue() )
lowercase =pq.read_table(lowercase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def UpperCamelCase ( lowercase_ : Dict , lowercase_ : Any ) -> str:
'''simple docstring'''
import PIL.Image
lowercase =str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowercase_ , format='''png''' )
lowercase =pa.BufferOutputStream()
with ParquetWriter(
stream=lowercase_ , features=Features({'''image''': Image()} ) , embed_local_files=lowercase_ ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
lowercase =pa.BufferReader(output.getvalue() )
lowercase =pq.read_table(lowercase_ )
lowercase =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , lowercase_ )
with open(lowercase_ , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def UpperCamelCase ( ) -> str:
'''simple docstring'''
lowercase =pa.schema([pa.field('''col_1''' , pa.string() , nullable=lowercase_ )] )
lowercase =pa.BufferOutputStream()
with ArrowWriter(stream=lowercase_ ) as writer:
writer._build_writer(inferred_schema=lowercase_ )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 72 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case__ ( UpperCamelCase):
a_ = ["image_processor", "tokenizer"]
a_ = "LayoutLMv2ImageProcessor"
a_ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , _A : str=None , _A : Optional[Any]=None , **_A : Any ) -> Tuple:
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
UpperCAmelCase_ : int = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_A , _A )
def __call__( self : str , _A : Optional[int] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A : Union[List[List[int]], List[List[List[int]]]] = None , _A : Optional[Union[List[int], List[List[int]]]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : int = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
UpperCAmelCase_ : int = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : int = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
UpperCAmelCase_ : int = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : List[Any] = self.get_overflowing_images(_A , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : Optional[int] = images
return encoded_inputs
def A ( self : Union[str, Any] , _A : int , _A : Tuple ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F" {len(_A )} and {len(_A )}" )
return images_with_overflow
def A ( self : Optional[Any] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
return self.tokenizer.batch_decode(*_A , **_A )
def A ( self : Any , *_A : Optional[Any] , **_A : Tuple ) -> Tuple:
return self.tokenizer.decode(*_A , **_A )
@property
def A ( self : Union[str, Any] ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A ( self : Tuple ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , )
return self.image_processor_class
@property
def A ( self : Tuple ) -> str:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , )
return self.image_processor
| 541 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : str ,**_a : List[Any] ):
'''simple docstring'''
requires_backends(self ,['bs4'] )
super().__init__(**_a )
def __lowercase ( self : Tuple ,_a : List[str] ):
'''simple docstring'''
_a : List[str] = []
_a : Any = []
_a : int = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_a : Dict = parent.find_all(child.name ,recursive=_a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_a ) else next(i for i, s in enumerate(_a ,1 ) if s is child ) )
_a : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __lowercase ( self : int ,_a : Any ):
'''simple docstring'''
_a : List[str] = BeautifulSoup(_a ,'html.parser' )
_a : int = []
_a : int = []
_a : Optional[Any] = []
for element in html_code.descendants:
if type(_a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_a : Tuple = html.unescape(_a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_a )
_a, _a : Tuple = self.xpath_soup(_a )
stringaxtag_seq.append(_a )
stringaxsubs_seq.append(_a )
if len(_a ) != len(_a ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(_a ) != len(_a ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __lowercase ( self : List[Any] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
_a : List[str] = ''
for tagname, subs in zip(_a ,_a ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : str ,_a : Tuple ):
'''simple docstring'''
_a : Optional[int] = False
# Check that strings has a valid type
if isinstance(_a ,_a ):
_a : Optional[int] = True
elif isinstance(_a ,(list, tuple) ):
if len(_a ) == 0 or isinstance(html_strings[0] ,_a ):
_a : int = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F"""but is of type {type(_a )}.""" )
_a : List[str] = bool(isinstance(_a ,(list, tuple) ) and (isinstance(html_strings[0] ,_a )) )
if not is_batched:
_a : Optional[int] = [html_strings]
# Get nodes + xpaths
_a : Union[str, Any] = []
_a : int = []
for html_string in html_strings:
_a, _a, _a : Any = self.get_three_from_single(_a )
nodes.append(_a )
_a : Tuple = []
for node, tag_list, sub_list in zip(_a ,_a ,_a ):
_a : Dict = self.construct_xpath(_a ,_a )
xpath_strings.append(_a )
xpaths.append(_a )
# return as Dict
_a : Optional[Any] = {'nodes': nodes, 'xpaths': xpaths}
_a : Tuple = BatchFeature(data=_a ,tensor_type=_a )
return encoded_inputs
| 319 | 1 |
class lowercase_ :
def __init__( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
def __a ( self : Optional[int] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(__UpperCamelCase , ' -> ' , ' -> '.join([str(__UpperCamelCase ) for j in self.vertex[i]] ) )
def __a ( self : Any , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__UpperCamelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__UpperCamelCase , __UpperCamelCase )
def __a ( self : Any , snake_case__ : int , snake_case__ : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
print(__UpperCamelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE: Union[str, Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 360 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __snake_case ( _UpperCamelCase ) -> str:
_a = model.config
_a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
_a = MBartConfig(
is_decoder=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , add_cross_attention=_UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_UpperCamelCase , add_final_layer_norm=_UpperCamelCase , )
return encoder_config, decoder_config
def __snake_case ( _UpperCamelCase ) -> Dict:
if "encoder.model" in name:
_a = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_a = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_a = '''encoder.''' + name
if "attn.proj" in name:
_a = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_a = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_a = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_a = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_a = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_a = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_a = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_a = '''encoder.layernorm.bias'''
return name
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
_a = key.split('''.''' )
_a = int(key_split[3] )
_a = int(key_split[5] )
_a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_a = val
return orig_state_dict
def __snake_case ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False ) -> Optional[int]:
# load original model
_a = DonutModel.from_pretrained(_UpperCamelCase ).eval()
# load HuggingFace model
_a , _a = get_configs(_UpperCamelCase )
_a = DonutSwinModel(_UpperCamelCase )
_a = MBartForCausalLM(_UpperCamelCase )
_a = VisionEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
model.eval()
_a = original_model.state_dict()
_a = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# verify results on scanned document
_a = load_dataset('''hf-internal-testing/example-documents''' )
_a = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_a = XLMRobertaTokenizerFast.from_pretrained(_UpperCamelCase , from_slow=_UpperCamelCase )
_a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_a = DonutProcessor(_UpperCamelCase , _UpperCamelCase )
_a = processor(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_a = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_a = '''When is the coffee break?'''
_a = task_prompt.replace('''{user_input}''' , _UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_a = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_a = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_a = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_a = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_a = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_a = original_model.decoder.tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_a = original_model.encoder.model.patch_embed(_UpperCamelCase )
_a , _a = model.encoder.embeddings(_UpperCamelCase )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
# verify encoder hidden states
_a = original_model.encoder(_UpperCamelCase )
_a = model.encoder(_UpperCamelCase ).last_hidden_state
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-2 )
# verify decoder hidden states
_a = original_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).logits
_a = model(_UpperCamelCase , decoder_input_ids=_UpperCamelCase ).logits
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowerCamelCase :Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 487 | 0 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase__ :
def __init__(self , _a ) -> Union[str, Any]:
lowercase_ : Tuple = order
# a_{0} ... a_{k}
lowercase_ : Dict = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowercase_ : Optional[int] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowercase_ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
lowercase_ : Any = [0.0] * self.order
def _lowerCamelCase (self , _a , _a ) -> List[str]:
if len(__snake_case ) < self.order:
lowercase_ : int = [1.0, *a_coeffs]
if len(__snake_case ) != self.order + 1:
lowercase_ : str = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__snake_case )}'''
)
raise ValueError(__snake_case )
if len(__snake_case ) != self.order + 1:
lowercase_ : str = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__snake_case )}'''
)
raise ValueError(__snake_case )
lowercase_ : Optional[int] = a_coeffs
lowercase_ : Tuple = b_coeffs
def _lowerCamelCase (self , _a ) -> Tuple:
lowercase_ : int = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowercase_ : Union[str, Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowercase_ : Optional[Any] = self.input_history[:-1]
lowercase_ : Union[str, Any] = self.output_history[:-1]
lowercase_ : int = sample
lowercase_ : int = result
return result
| 705 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase (self ) -> List[Any]:
lowercase_ : Any = tempfile.mkdtemp()
lowercase_ : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowercase_ : Union[str, Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_a , _a )
def _lowerCamelCase (self , **_a ) -> Tuple:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase (self , **_a ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase (self , **_a ) -> int:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase (self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase (self ) -> str:
lowercase_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase_ : Optional[Any] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase (self ) -> Optional[Any]:
lowercase_ : str = self.get_tokenizer()
lowercase_ : Tuple = self.get_rust_tokenizer()
lowercase_ : int = self.get_image_processor()
lowercase_ : int = AlignProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
lowercase_ : Optional[Any] = AlignProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : Any = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : str = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase_ : str = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowercase_ : Optional[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : Union[str, Any] = self.get_image_processor()
lowercase_ : Union[str, Any] = self.get_tokenizer()
lowercase_ : Optional[int] = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase_ : Union[str, Any] = self.prepare_image_inputs()
lowercase_ : Union[str, Any] = image_processor(_a , return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=_a , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase (self ) -> Dict:
lowercase_ : List[Any] = self.get_image_processor()
lowercase_ : int = self.get_tokenizer()
lowercase_ : Any = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase_ : Tuple = 'lower newer'
lowercase_ : Optional[Any] = processor(text=_a )
lowercase_ : List[Any] = tokenizer(_a , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase (self ) -> List[Any]:
lowercase_ : Any = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[str] = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase_ : Optional[Any] = 'lower newer'
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : List[str] = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : Dict = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : Optional[Any] = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : str = processor.batch_decode(_a )
lowercase_ : Dict = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : str = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase_ : Optional[int] = 'lower newer'
lowercase_ : Union[str, Any] = self.prepare_image_inputs()
lowercase_ : Union[str, Any] = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 438 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[str] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase_, architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__lowerCAmelCase = torch.load(hf_hub_download(repo_id=lowerCAmelCase_, filename='pytorch_model.bin' ) )
__lowerCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__lowerCAmelCase = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__lowerCAmelCase = tensor_value
__lowerCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase_, config=lowerCAmelCase_, state_dict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
# convert tokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53 |
import math
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int ):
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
__lowerCAmelCase = 0
while arr[min(lowerCAmelCase_, lowerCAmelCase_ ) - 1] < x:
__lowerCAmelCase = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCAmelCase = prev + 1
if prev == min(lowerCAmelCase_, lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_snake_case : List[str] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Optional[Any] = [int(item) for item in user_input.split(',')]
_snake_case : List[str] = int(input('Enter the number to be searched:\n'))
_snake_case : Optional[int] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"""Number {x} is at index {res}""")
| 53 | 1 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = len(lowerCamelCase__ )
A__ = len(lowerCamelCase__ )
A__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ = True
for i in range(lowerCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ = True
if a[i].islower():
A__ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 | """simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self ,__UpperCAmelCase ) -> str:
A__ = parent
def snake_case__ ( self ) -> int:
return {}
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
A__ = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case__ ( self ) -> Dict:
A__ = MarkupLMFeatureExtractionTester(self )
@property
def snake_case__ ( self ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case__ ( self ) -> Any:
# Initialize feature_extractor
A__ = self.feature_extraction_class()
# Test not batched input
A__ = get_html_strings()[0]
A__ = feature_extractor(__UpperCAmelCase )
# fmt: off
A__ = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
A__ = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes ,__UpperCAmelCase )
self.assertEqual(encoding.xpaths ,__UpperCAmelCase )
# Test batched
A__ = get_html_strings()
A__ = feature_extractor(__UpperCAmelCase )
# fmt: off
A__ = expected_nodes + [['My First Heading', 'My first paragraph.']]
A__ = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) ,2 )
self.assertEqual(len(encoding.xpaths ) ,2 )
self.assertEqual(encoding.nodes ,__UpperCAmelCase )
self.assertEqual(encoding.xpaths ,__UpperCAmelCase )
| 536 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = '▁'
SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE :Dict = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
SCREAMING_SNAKE_CASE :Optional[int] = {
'google/reformer-crime-and-punishment': 52_4288,
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Tuple ,A : Any ,A : Any="</s>" ,A : Tuple="<unk>" ,A : Any=[] ,A : Optional[Dict[str, Any]] = None ,**A : List[Any] ,):
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A ,unk_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = vocab_file
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.sp_model.get_piece_size()
def UpperCamelCase_ ( self : List[str] ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : str ,A : Any ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : int ,A : str ):
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : str ,A : Tuple ):
return self.sp_model.piece_to_id(A )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ):
if index < self.sp_model.get_piece_size():
__A = self.sp_model.IdToPiece(A )
return token
def UpperCamelCase_ ( self : List[Any] ,A : List[str] ):
__A = []
__A = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
__A = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCamelCase_ ( self : List[Any] ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 55 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 0 |
from scipy.stats import pearsonr
import datasets
a : List[Any] = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
a : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
a : int = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def A ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def A ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Dict:
'''simple docstring'''
if return_pvalue:
__lowercase = pearsonr(snake_case_ , snake_case_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(snake_case_ , snake_case_ )[0] )}
| 527 |
import unittest
from knapsack import knapsack as k
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = 0
__lowercase = [0]
__lowercase = [0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 0 )
__lowercase = [6_0]
__lowercase = [1_0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 0 )
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = 3
__lowercase = [1, 2, 3]
__lowercase = [3, 2, 1]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 5 )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = 5_0
__lowercase = [6_0, 1_0_0, 1_2_0]
__lowercase = [1_0, 2_0, 3_0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 527 | 1 |
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = len(__UpperCamelCase )
while cur > 1:
# Find the maximum number in arr
A_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A_ = arr[mi::-1] + arr[mi + 1 : len(__UpperCamelCase )]
# Reverse whole list
A_ = arr[cur - 1 :: -1] + arr[cur : len(__UpperCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
__a :Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__a :List[Any] = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted)) | 86 |
def a ( a ) ->List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(a )
while cur > 1:
# Find the maximum number in arr
SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a )]
# Reverse whole list
SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a )]
cur -= 1
return arr
if __name__ == "__main__":
__lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted)) | 201 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : TransformeraDModel , lowerCamelCase : AutoencoderKL , lowerCamelCase : KarrasDiffusionSchedulers , lowerCamelCase : Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=lowerCamelCase , vae=lowerCamelCase , scheduler=lowerCamelCase )
# create a imagenet -> id dictionary for easier use
__lowercase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
__lowercase = int(lowerCamelCase )
__lowercase = dict(sorted(self.labels.items() ) )
def _snake_case ( self : int , lowerCamelCase : Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = list(lowerCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Any , lowerCamelCase : List[int] , lowerCamelCase : float = 4.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : int = 50 , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ):
'''simple docstring'''
__lowercase = len(lowerCamelCase )
__lowercase = self.transformer.config.sample_size
__lowercase = self.transformer.config.in_channels
__lowercase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCamelCase , device=self.device , dtype=self.transformer.dtype , )
__lowercase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowercase = torch.tensor(lowerCamelCase , device=self.device ).reshape(-1 )
__lowercase = torch.tensor([1_000] * batch_size , device=self.device )
__lowercase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowercase = latent_model_input[: len(lowerCamelCase ) // 2]
__lowercase = torch.cat([half, half] , dim=0 )
__lowercase = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
__lowercase = t
if not torch.is_tensor(lowerCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowercase = latent_model_input.device.type == "mps"
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = torch.floataa if is_mps else torch.floataa
else:
__lowercase = torch.intaa if is_mps else torch.intaa
__lowercase = torch.tensor([timesteps] , dtype=lowerCamelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowercase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowercase = self.transformer(
lowerCamelCase , timestep=lowerCamelCase , class_labels=lowerCamelCase ).sample
# perform guidance
if guidance_scale > 1:
__lowercase , __lowercase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowercase , __lowercase = torch.split(lowerCamelCase , len(lowerCamelCase ) // 2 , dim=0 )
__lowercase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowercase = torch.cat([half_eps, half_eps] , dim=0 )
__lowercase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowercase , __lowercase = torch.split(lowerCamelCase , lowerCamelCase , dim=1 )
else:
__lowercase = noise_pred
# compute previous image: x_t -> x_t-1
__lowercase = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
if guidance_scale > 1:
__lowercase , __lowercase = latent_model_input.chunk(2 , dim=0 )
else:
__lowercase = latent_model_input
__lowercase = 1 / self.vae.config.scaling_factor * latents
__lowercase = self.vae.decode(lowerCamelCase ).sample
__lowercase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCamelCase )
| 655 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = SwinvaConfig()
__lowercase = swinva_name.split("_" )
__lowercase = name_split[1]
if "to" in name_split[3]:
__lowercase = int(name_split[3][-3:] )
else:
__lowercase = int(name_split[3] )
if "to" in name_split[2]:
__lowercase = int(name_split[2][-2:] )
else:
__lowercase = int(name_split[2][6:] )
if model_size == "tiny":
__lowercase = 9_6
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowercase = 9_6
__lowercase = (2, 2, 1_8, 2)
__lowercase = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowercase = 1_2_8
__lowercase = (2, 2, 1_8, 2)
__lowercase = (4, 8, 1_6, 3_2)
else:
__lowercase = 1_9_2
__lowercase = (2, 2, 1_8, 2)
__lowercase = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowercase = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowercase = 2_1_8_4_1
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-22k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
else:
__lowercase = 1_0_0_0
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-1k-id2label.json"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = img_size
__lowercase = num_classes
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
return config
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if "patch_embed.proj" in name:
__lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowercase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowercase = "encoder." + name
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowercase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__lowercase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__lowercase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__lowercase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
__lowercase = "layernorm.weight"
if name == "norm.bias":
__lowercase = "layernorm.bias"
if "head" in name:
__lowercase = name.replace("head" , "classifier" )
else:
__lowercase = "swinv2." + name
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase = key.split("." )
__lowercase = int(key_split[1] )
__lowercase = int(key_split[3] )
__lowercase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[dim : dim * 2, :]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
__lowercase = get_swinva_config(_SCREAMING_SNAKE_CASE )
__lowercase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__lowercase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
__lowercase = timm_model(inputs["pixel_values"] )
__lowercase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case__ : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 655 | 1 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
UpperCamelCase__ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = create_model(
'''HTSAT-tiny''' , '''roberta''' , lowerCAmelCase__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=lowerCAmelCase__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = R'''.*sequential.(\d+).*'''
UpperCAmelCase__ : Optional[int] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase__ : List[str] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase__ : Any = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase__ : Any = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase__ : Dict = 1 if projecton_layer == 0 else 2
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase__ : Dict = value
UpperCAmelCase__ : List[Any] = mixed_qkv.size(0 ) // 3
UpperCAmelCase__ : str = mixed_qkv[:qkv_dim]
UpperCAmelCase__ : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase__ : Union[str, Any] = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase__ : str = query_layer
UpperCAmelCase__ : Tuple = key_layer
UpperCAmelCase__ : int = value_layer
else:
UpperCAmelCase__ : List[Any] = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase__ : Union[str, Any] = clap_model.state_dict()
UpperCAmelCase__ : Optional[int] = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = ClapConfig()
UpperCAmelCase__ : Tuple = enable_fusion
UpperCAmelCase__ : Union[str, Any] = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
UpperCamelCase__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 75 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=3 , A_=4 , A_=None , ) -> List[str]:
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = """gelu"""
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 512
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.0_2
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
lowerCAmelCase = TFRoFormerModel(config=A_ )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(A_ )
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
lowerCAmelCase = True
lowerCAmelCase = TFRoFormerForCausalLM(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
lowerCAmelCase = TFRoFormerForMaskedLM(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFRoFormerForSequenceClassification(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFRoFormerForMultipleChoice(config=A_ )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFRoFormerForTokenClassification(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = TFRoFormerForQuestionAnswering(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Any = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Optional[int] = False
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = TFRoFormerModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __snake_case ( self ) -> Dict:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __snake_case ( self ) -> int:
lowerCAmelCase = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(A_ )
@require_tf
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(A_ )[0]
# TODO Replace vocab size
lowerCAmelCase = 5_0000
lowerCAmelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
@require_tf
class __snake_case( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1e-4
def __snake_case ( self ) -> Any:
lowerCAmelCase = tf.constant([[4, 10]] )
lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase = emba(input_ids.shape )
lowerCAmelCase = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase = emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class __snake_case( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 1e-4
def __snake_case ( self ) -> Tuple:
# 2,12,16,64
lowerCAmelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase, lowerCAmelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
lowerCAmelCase = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowerCAmelCase = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance ) | 344 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
lowerCAmelCase = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowerCAmelCase = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for value in value_array:
lowerCAmelCase = euclidean(_SCREAMING_SNAKE_CASE , dataset[0] )
lowerCAmelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase = euclidean(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if dist > temp_dist:
lowerCAmelCase = temp_dist
lowerCAmelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> float:
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / (norm(_SCREAMING_SNAKE_CASE ) * norm(_SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 344 | 1 |
'''simple docstring'''
from ... import PretrainedConfig
__snake_case ={
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCamelCase : List[str] = '''nezha'''
def __init__( self : Dict , UpperCAmelCase__ : str=2_1_1_2_8 , UpperCAmelCase__ : Union[str, Any]=7_6_8 , UpperCAmelCase__ : List[Any]=1_2 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : List[Any]=3_0_7_2 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Any=5_1_2 , UpperCAmelCase__ : List[str]=6_4 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Dict=1E-12 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=True , **UpperCAmelCase__ : Any , ) -> str:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = max_relative_position
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = use_cache
| 133 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = '''gpt_bigcode'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : List[Any] = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=5_0_2_5_7 , UpperCAmelCase__ : List[str]=1_0_2_4 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : int=1_2 , UpperCAmelCase__ : List[str]=1_2 , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]="gelu_pytorch_tanh" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=5_0_2_5_6 , UpperCAmelCase__ : Any=5_0_2_5_6 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : int , ) -> List[str]:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = attention_softmax_in_fpaa
lowerCAmelCase = scale_attention_softmax_in_fpaa
lowerCAmelCase = multi_query
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 133 | 1 |
import requests
from bsa import BeautifulSoup
def snake_case_ ( __lowercase = "https://www.worldometers.info/coronavirus" ):
UpperCAmelCase_ : Optional[Any] = BeautifulSoup(requests.get(__lowercase ).text , '''html.parser''' )
UpperCAmelCase_ : Tuple = soup.findAll('''h1''' )
UpperCAmelCase_ : Optional[int] = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowercase , __lowercase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n') | 641 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor | 641 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 201 |
from PIL import Image
def a ( a ) ->Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = image.load()
for i in range(a ):
for j in range(a ):
SCREAMING_SNAKE_CASE = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(a ):
for i in range(a ):
SCREAMING_SNAKE_CASE = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCAmelCase = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path') | 201 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : Dict = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase_ : str = """scheduler_config.json"""
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = 1
UpperCamelCase__ = 2
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 5
@dataclass
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = 42
class lowerCamelCase__ :
"""simple docstring"""
UpperCamelCase__ = SCHEDULER_CONFIG_NAME
UpperCamelCase__ = ['''dtype''']
UpperCamelCase__ = []
UpperCamelCase__ = True
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] ,a__ : Dict[str, Any] = None ,a__ : Optional[str] = None ,a__ : Union[str, Any]=False ,**a__ : Tuple ,):
a__ , a__ = cls.load_config(
pretrained_model_name_or_path=a__ ,subfolder=a__ ,return_unused_kwargs=a__ ,**a__ ,)
a__ , a__ = cls.from_config(a__ ,return_unused_kwargs=a__ ,**a__ )
if hasattr(a__ ,"create_state" ) and getattr(a__ ,"has_state" ,a__ ):
a__ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase_ ( self : Any ,a__ : Union[str, os.PathLike] ,a__ : bool = False ,**a__ : Optional[int] ):
self.save_config(save_directory=a__ ,push_to_hub=a__ ,**a__ )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls : str ):
a__ = list(set([cls.__name__] + cls._compatibles ) )
a__ = importlib.import_module(__name__.split("." )[0] )
a__ = [
getattr(a__ ,a__ ) for c in compatible_classes_str if hasattr(a__ ,a__ )
]
return compatible_classes
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
assert len(_lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowercase ) - x.ndim) ) , _lowercase )
def _lowerCAmelCase (_lowercase , _lowercase=0.999 , _lowercase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_lowercase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
a__ = []
for i in range(_lowercase ):
a__ = i / num_diffusion_timesteps
a__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowercase ) / alpha_bar(_lowercase ) , _lowercase ) )
return jnp.array(_lowercase , dtype=_lowercase )
@flax.struct.dataclass
class lowerCamelCase__ :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@classmethod
def lowerCAmelCase_ ( cls : Tuple ,a__ : List[Any] ):
a__ = scheduler.config
if config.trained_betas is not None:
a__ = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
a__ = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
a__ = 1.0 - betas
a__ = jnp.cumprod(a__ ,axis=0 )
return cls(
alphas=a__ ,betas=a__ ,alphas_cumprod=a__ ,)
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = state.alphas_cumprod
a__ = alphas_cumprod[timesteps] ** 0.5
a__ = sqrt_alpha_prod.flatten()
a__ = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
a__ = (1 - alphas_cumprod[timesteps]) ** 0.5
a__ = sqrt_one_minus_alpha_prod.flatten()
a__ = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ , a__ = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
a__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ , a__ = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
a__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 394 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE__ : Optional[int] = 'scheduler_config.json'
class lowerCamelCase_ ( __snake_case ):
a__ = 1
a__ = 2
a__ = 3
a__ = 4
a__ = 5
a__ = 6
a__ = 7
a__ = 8
a__ = 9
a__ = 10
a__ = 11
a__ = 12
a__ = 13
a__ = 14
@dataclass
class lowerCamelCase_ ( __snake_case ):
a__ = 42
class lowerCamelCase_ :
a__ = SCHEDULER_CONFIG_NAME
a__ = []
a__ = True
@classmethod
def A ( cls , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :str = cls.load_config(
pretrained_model_name_or_path=__lowerCAmelCase , subfolder=__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , return_commit_hash=__lowerCAmelCase , **__lowerCAmelCase , )
return cls.from_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = False , **__lowerCAmelCase ):
"""simple docstring"""
self.save_config(save_directory=__lowerCAmelCase , push_to_hub=__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def A ( cls ):
"""simple docstring"""
__magic_name__ :Tuple = list(set([cls.__name__] + cls._compatibles ) )
__magic_name__ :Tuple = importlib.import_module(__name__.split('''.''' )[0] )
__magic_name__ :Optional[int] = [
getattr(__lowerCAmelCase , __lowerCAmelCase ) for c in compatible_classes_str if hasattr(__lowerCAmelCase , __lowerCAmelCase )
]
return compatible_classes
| 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
A : Optional[Any] = [x.strip() for x in open(snake_case__ ).readlines()]
A : Tuple = [x.strip() for x in open(snake_case__ ).readlines()][: len(snake_case__ )]
A : Union[str, Any] = calculate_rouge(snake_case__ , snake_case__ , **snake_case__ )
if save_path is not None:
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 634 | 0 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
A = logging.get_logger(__name__)
class a__ ( __magic_name__ ):
def a_ ( self : str , UpperCamelCase_ : List[str]):
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_):
__UpperCAmelCase : List[Any] = [label.strip() for label in labels.split(",") if label.strip()]
return labels
def __call__( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
if len(UpperCamelCase_) == 0 or len(UpperCamelCase_) == 0:
raise ValueError("You must include at least one label and at least one sequence.")
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(UpperCamelCase_))
if isinstance(UpperCamelCase_ , UpperCamelCase_):
__UpperCAmelCase : Union[str, Any] = [sequences]
__UpperCAmelCase : Tuple = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(UpperCamelCase_)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class a__ ( __magic_name__ ):
def __init__( self : int , UpperCamelCase_ : Dict=ZeroShotClassificationArgumentHandler() , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : List[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = args_parser
super().__init__(*UpperCamelCase_ , **UpperCamelCase_)
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.")
@property
def a_ ( self : Tuple):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail"):
return ind
return -1
def a_ ( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Tuple=TruncationStrategy.ONLY_FIRST , **UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`")
__UpperCAmelCase : List[str] = self.tokenizer.eos_token
try:
__UpperCAmelCase : List[str] = self.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , )
except Exception as e:
if "too short" in str(UpperCamelCase_):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__UpperCAmelCase : Any = self.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def a_ ( self : Union[str, Any] , **UpperCamelCase_ : int):
"""simple docstring"""
if kwargs.get("multi_class" , UpperCamelCase_) is not None:
__UpperCAmelCase : List[Any] = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers.")
__UpperCAmelCase : Dict = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : List[str] = self._args_parser._parse_labels(kwargs["candidate_labels"])
if "hypothesis_template" in kwargs:
__UpperCAmelCase : Any = kwargs["hypothesis_template"]
__UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
__UpperCAmelCase : Optional[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , UpperCamelCase_ : Union[str, List[str]] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
if len(UpperCamelCase_) == 0:
pass
elif len(UpperCamelCase_) == 1 and "candidate_labels" not in kwargs:
__UpperCAmelCase : Union[str, Any] = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}")
return super().__call__(UpperCamelCase_ , **UpperCamelCase_)
def a_ ( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : int="This example is {}."):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._args_parser(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
for i, (candidate_label, sequence_pair) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_)):
__UpperCAmelCase : Union[str, Any] = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(UpperCamelCase_) - 1,
**model_input,
}
def a_ ( self : Tuple , UpperCamelCase_ : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[str] = inputs["candidate_label"]
__UpperCAmelCase : Dict = inputs["sequence"]
__UpperCAmelCase : Tuple = {k: inputs[k] for k in self.tokenizer.model_input_names}
__UpperCAmelCase : Union[str, Any] = self.model(**UpperCamelCase_)
__UpperCAmelCase : Any = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def a_ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]=False):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [outputs["candidate_label"] for outputs in model_outputs]
__UpperCAmelCase : List[str] = [outputs["sequence"] for outputs in model_outputs]
__UpperCAmelCase : int = np.concatenate([output["logits"].numpy() for output in model_outputs])
__UpperCAmelCase : List[str] = logits.shape[0]
__UpperCAmelCase : List[Any] = len(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = N // n
__UpperCAmelCase : Dict = logits.reshape((num_sequences, n, -1))
if multi_label or len(UpperCamelCase_) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__UpperCAmelCase : int = self.entailment_id
__UpperCAmelCase : Union[str, Any] = -1 if entailment_id == 0 else 0
__UpperCAmelCase : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
__UpperCAmelCase : Any = np.exp(UpperCamelCase_) / np.exp(UpperCamelCase_).sum(-1 , keepdims=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__UpperCAmelCase : Optional[Any] = reshaped_outputs[..., self.entailment_id]
__UpperCAmelCase : Optional[Any] = np.exp(UpperCamelCase_) / np.exp(UpperCamelCase_).sum(-1 , keepdims=UpperCamelCase_)
__UpperCAmelCase : str = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 487 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A = logging.get_logger(__name__)
A = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class a__ ( __magic_name__ ):
lowercase_ = "table-transformer"
lowercase_ = ["past_key_values"]
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , UpperCamelCase_ : int=True , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Any=100 , UpperCamelCase_ : Dict=6 , UpperCamelCase_ : List[str]=2048 , UpperCamelCase_ : str=8 , UpperCamelCase_ : Any=6 , UpperCamelCase_ : Dict=2048 , UpperCamelCase_ : str=8 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]="relu" , UpperCamelCase_ : str=256 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : Optional[Any]=1.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : int="sine" , UpperCamelCase_ : Optional[Any]="resnet50" , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=False , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : Any=5 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : int=1 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Optional[int]=0.1 , **UpperCamelCase_ : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
__UpperCAmelCase : int = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(UpperCamelCase_ , UpperCamelCase_):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type")
__UpperCAmelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Optional[int] = config_class.from_dict(UpperCamelCase_)
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : str = use_timm_backbone
__UpperCAmelCase : int = backbone_config
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : Optional[Any] = num_queries
__UpperCAmelCase : List[Any] = d_model
__UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
__UpperCAmelCase : int = encoder_layers
__UpperCAmelCase : Union[str, Any] = encoder_attention_heads
__UpperCAmelCase : str = decoder_ffn_dim
__UpperCAmelCase : Any = decoder_layers
__UpperCAmelCase : Dict = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : str = activation_dropout
__UpperCAmelCase : Union[str, Any] = activation_function
__UpperCAmelCase : Optional[int] = init_std
__UpperCAmelCase : Dict = init_xavier_std
__UpperCAmelCase : Tuple = encoder_layerdrop
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : Tuple = encoder_layers
__UpperCAmelCase : Optional[int] = auxiliary_loss
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : List[str] = backbone
__UpperCAmelCase : Optional[int] = use_pretrained_backbone
__UpperCAmelCase : Union[str, Any] = dilation
# Hungarian matcher
__UpperCAmelCase : List[Any] = class_cost
__UpperCAmelCase : Tuple = bbox_cost
__UpperCAmelCase : int = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Optional[Any] = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : int = giou_loss_coefficient
__UpperCAmelCase : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_)
@property
def a_ ( self : int):
"""simple docstring"""
return self.encoder_attention_heads
@property
def a_ ( self : int):
"""simple docstring"""
return self.d_model
class a__ ( __magic_name__ ):
lowercase_ = version.parse("1.11" )
@property
def a_ ( self : Union[str, Any]):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
])
@property
def a_ ( self : int):
"""simple docstring"""
return 1e-5
@property
def a_ ( self : Tuple):
"""simple docstring"""
return 12
| 487 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableUnCLIPPipeline
A__ = TEXT_TO_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_BATCH_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A__ = False
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = 32
lowercase__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_UpperCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase )
lowercase__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL()
lowercase__ = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str]=0 ) -> List[str]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowercase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase__ = pipe("""anime turle""" , generator=_UpperCAmelCase , output_type="""np""" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 15 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__magic_name__ : Optional[int] = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = coefficient_matrix.shape
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
__SCREAMING_SNAKE_CASE : List[str] = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(snake_case )
if colsa != 1:
__SCREAMING_SNAKE_CASE : List[str] = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(snake_case )
if rowsa != rowsa:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(snake_case )
if len(snake_case ) != rowsa:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''Number of initial values must be equal to number of rows in coefficient '''
F'''matrix but received {len(snake_case )} and {rowsa}'''
)
raise ValueError(snake_case )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
__SCREAMING_SNAKE_CASE : List[Any] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = table.shape
strictly_diagonally_dominant(snake_case )
# Iterates the whole matrix for given number of times
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : List[str] = []
for row in range(snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
for col in range(snake_case ):
if col == row:
__SCREAMING_SNAKE_CASE : List[str] = table[row][col]
elif col == cols - 1:
__SCREAMING_SNAKE_CASE : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__SCREAMING_SNAKE_CASE : int = (temp + val) / denom
new_val.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = new_val
return [float(snake_case ) for i in new_val]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = table.shape
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
for i in range(0 , snake_case ):
__SCREAMING_SNAKE_CASE : Optional[int] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import functools
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = len(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = len(snake_case )
@functools.cache
def min_distance(snake_case , snake_case ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=16, lowerCAmelCase=[1, 2, 1], lowerCAmelCase=[2, 2, 4], lowerCAmelCase=2, lowerCAmelCase=2.0, lowerCAmelCase=True, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.1, lowerCAmelCase="gelu", lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-5, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=10, lowerCAmelCase=8, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =embed_dim
lowerCamelCase_ =depths
lowerCamelCase_ =num_heads
lowerCamelCase_ =window_size
lowerCamelCase_ =mlp_ratio
lowerCamelCase_ =qkv_bias
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =hidden_act
lowerCamelCase_ =use_absolute_embeddings
lowerCamelCase_ =patch_norm
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =initializer_range
lowerCamelCase_ =is_training
lowerCamelCase_ =scope
lowerCamelCase_ =use_labels
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =encoder_stride
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =SwinvaModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ =model(__a )
lowerCamelCase_ =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =SwinvaForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ =model(__a )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ =1
lowerCamelCase_ =SwinvaForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ =model(__a )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.type_sequence_label_size
lowerCamelCase_ =SwinvaForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase_ =model(__a, labels=__a )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowercase : Union[str, Any] =(
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase : Optional[Any] =(
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase : Union[str, Any] =False
lowercase : str =False
lowercase : Optional[int] =False
lowercase : Optional[int] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SwinvaModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=__a, embed_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(__a )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a, nn.Linear ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(__a )
lowerCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =["""pixel_values"""]
self.assertListEqual(arg_names[:1], __a )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =True
for model_class in self.all_model_classes:
lowerCamelCase_ =True
lowerCamelCase_ =False
lowerCamelCase_ =True
lowerCamelCase_ =model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(__a, __a ) )
lowerCamelCase_ =outputs.attentions
lowerCamelCase_ =len(self.model_tester.depths )
self.assertEqual(len(__a ), __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ =True
lowerCamelCase_ =config.window_size**2
lowerCamelCase_ =model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(__a, __a ) )
lowerCamelCase_ =outputs.attentions
self.assertEqual(len(__a ), __a )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], )
lowerCamelCase_ =len(__a )
# Check attention is always last and order is fine
lowerCamelCase_ =True
lowerCamelCase_ =True
lowerCamelCase_ =model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(__a, __a ) )
if hasattr(self.model_tester, '''num_hidden_states_types''' ):
lowerCamelCase_ =self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCamelCase_ =2
self.assertEqual(out_len + added_hidden_states, len(__a ) )
lowerCamelCase_ =outputs.attentions
self.assertEqual(len(__a ), __a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(__a, __a ) )
lowerCamelCase_ =outputs.hidden_states
lowerCamelCase_ =getattr(
self.model_tester, '''expected_num_hidden_layers''', len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ), __a )
# Swinv2 has a different seq_length
lowerCamelCase_ =(
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
lowerCamelCase_ =outputs.reshaped_hidden_states
self.assertEqual(len(__a ), __a )
lowerCamelCase_ =reshaped_hidden_states[0].shape
lowerCamelCase_ =(
reshaped_hidden_states[0].view(__a, __a, height * width ).permute(0, 2, 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase_ =True
self.check_hidden_states_output(__a, __a, __a, __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ =True
self.check_hidden_states_output(__a, __a, __a, __a )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =3
lowerCamelCase_ =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ =(
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase_ =True
self.check_hidden_states_output(__a, __a, __a, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ =True
self.check_hidden_states_output(__a, __a, __a, (padded_height, padded_width) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =SwinvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =_config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
@require_vision
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
__a )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ =image_processor(images=__a, return_tensors='''pt''' ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**__a )
# verify the logits
lowerCamelCase_ =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, __a )
lowerCamelCase_ =torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __a, atol=1e-4 ) )
| 676 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase_: int = logging.get_logger(__name__)
lowercase_: Optional[int] = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'bloom'
__UpperCamelCase : List[str] = ['past_key_values']
__UpperCamelCase : Optional[int] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Union[str, Any] , __a : Tuple=2_5_0_8_8_0 , __a : Tuple=6_4 , __a : Optional[int]=2 , __a : Optional[int]=8 , __a : int=1e-5 , __a : Any=0.02 , __a : int=True , __a : Optional[Any]=1 , __a : Union[str, Any]=2 , __a : Any=False , __a : List[Any]=0.0 , __a : str=0.0 , __a : Optional[int]=1 , __a : Optional[int]=False , **__a : Dict , ):
snake_case__ : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
snake_case__ : str = kwargs.pop("""n_embed""" , __a )
snake_case__ : Any = hidden_size if n_embed is None else n_embed
snake_case__ : str = n_layer
snake_case__ : Optional[int] = n_head
snake_case__ : Any = layer_norm_epsilon
snake_case__ : Optional[Any] = initializer_range
snake_case__ : int = use_cache
snake_case__ : int = pretraining_tp
snake_case__ : Tuple = apply_residual_connection_post_layernorm
snake_case__ : Optional[int] = hidden_dropout
snake_case__ : List[str] = attention_dropout
snake_case__ : Optional[int] = bos_token_id
snake_case__ : Optional[Any] = eos_token_id
snake_case__ : Dict = slow_but_exact
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('1.12' )
def __init__( self : Tuple , __a : PretrainedConfig , __a : str = "default" , __a : List[PatchingSpec] = None , __a : bool = False , ):
super().__init__(__a , task=__a , patching_specs=__a , use_past=__a )
if not getattr(self._config , """pad_token_id""" , __a ):
# TODO: how to do that better?
snake_case__ : Optional[int] = 0
@property
def lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__a , direction="""inputs""" , inverted_values_shape=__a )
snake_case__ : List[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase ( self : Optional[Any] ):
return self._config.n_layer
@property
def lowercase ( self : List[Any] ):
return self._config.n_head
@property
def lowercase ( self : Optional[int] ):
return 1e-3
def lowercase ( self : List[Any] , __a : "PreTrainedTokenizer" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , ):
snake_case__ : List[str] = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
snake_case__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ , snake_case__ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case__ : int = seqlen + 2
snake_case__ : Any = self._config.hidden_size // self.num_attention_heads
snake_case__ : int = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case__ : int = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case__ : Union[str, Any] = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
snake_case__ : Optional[Any] = common_inputs["""attention_mask"""]
if self.use_past:
snake_case__ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
snake_case__ : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self : Optional[Any] ):
return 1_3
| 648 | 0 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ = tmp_path / "file.csv"
a_ = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(UpperCAmelCase , "w" ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = tmp_path / "malformed_file.csv"
a_ = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(UpperCAmelCase , "w" ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = tmp_path / "csv_with_image.csv"
a_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(UpperCAmelCase , "w" ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ = tmp_path / "csv_with_label.csv"
a_ = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(UpperCAmelCase , "w" ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = tmp_path / "csv_with_int_list.csv"
a_ = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(UpperCAmelCase , "w" ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = Csv()
a_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(UpperCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding="utf-8" ) as f:
a_ = f.read().splitlines()[1]
a_ = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
a_ = csv._generate_tables([[csv_file_with_image]] )
a_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
a_ = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , encoding="utf-8" ) as f:
a_ = f.read().splitlines()[1:]
a_ = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
a_ = csv._generate_tables([[csv_file_with_label]] )
a_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
a_ = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(UpperCAmelCase ) for label in labels]
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda UpperCAmelCase : [int(UpperCAmelCase ) for i in x.split()]} )
a_ = csv._generate_tables([[csv_file_with_int_list]] )
a_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
a_ = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 210 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Tuple = LDMTextToImagePipeline
a_ : Tuple = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
a_ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
a_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
a_ : int = False
def UpperCAmelCase__ ( self) ->Tuple:
torch.manual_seed(0)
a_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
a_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0)
a_ = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0)
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
a_ = CLIPTextModel(__UpperCAmelCase)
a_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a_ = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=0) ->Optional[Any]:
if str(__UpperCAmelCase).startswith("mps"):
a_ = torch.manual_seed(__UpperCAmelCase)
else:
a_ = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = LDMTextToImagePipeline(**__UpperCAmelCase)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_dummy_inputs(__UpperCAmelCase)
a_ = pipe(**__UpperCAmelCase).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
a_ = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0) ->Union[str, Any]:
a_ = torch.manual_seed(__UpperCAmelCase)
a_ = np.random.RandomState(__UpperCAmelCase).standard_normal((1, 4, 32, 32))
a_ = torch.from_numpy(__UpperCAmelCase).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase)
a_ = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self) ->str:
a_ = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_inputs(__UpperCAmelCase)
a_ = pipe(**__UpperCAmelCase).images
a_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
a_ = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878])
a_ = np.abs(expected_slice - image_slice).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0) ->List[Any]:
a_ = torch.manual_seed(__UpperCAmelCase)
a_ = np.random.RandomState(__UpperCAmelCase).standard_normal((1, 4, 32, 32))
a_ = torch.from_numpy(__UpperCAmelCase).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase)
a_ = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_inputs(__UpperCAmelCase)
a_ = pipe(**__UpperCAmelCase).images[0]
a_ = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy")
a_ = np.abs(expected_image - image).max()
assert max_diff < 1E-3 | 210 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 579 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {'vocab_file': 'spm_char.model'}
UpperCAmelCase_ : int = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
UpperCAmelCase_ : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase = None , **__lowercase , ):
"""simple docstring"""
__A : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__A : Optional[Any] = vocab_file
__A : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@property
def snake_case__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__A : List[Any] = self.__dict__.copy()
__A : str = None
return state
def __setstate__( self , __lowercase ):
"""simple docstring"""
__A : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : Any = {}
__A : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowercase )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Optional[Any] = self.sp_model.IdToPiece(__lowercase )
return token
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : str = []
__A : str = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
__A : Any = []
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def snake_case__ ( self , __lowercase , __lowercase=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case__ ( self , __lowercase , __lowercase = None , __lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
__A : Dict = [1]
if token_ids_a is None:
return ([0] * len(__lowercase )) + suffix_ones
return ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : Optional[int] = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , 'wb' ) as fi:
__A : Tuple = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 365 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowercase__ =parser.parse_args()
if args.model_type == "roberta":
lowercase__ =RobertaForMaskedLM.from_pretrained(args.model_name)
lowercase__ ='roberta'
elif args.model_type == "gpt2":
lowercase__ =GPTaLMHeadModel.from_pretrained(args.model_name)
lowercase__ ='transformer'
lowercase__ =model.state_dict()
lowercase__ ={}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowercase__ =state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowercase__ =F"""{prefix}.embeddings.{w}.weight"""
lowercase__ =state_dict[param_name]
for w in ["weight", "bias"]:
lowercase__ =F"""{prefix}.embeddings.LayerNorm.{w}"""
lowercase__ =state_dict[param_name]
# Transformer Blocks #
lowercase__ =0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowercase__ =state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
lowercase__ =state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowercase__ =state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowercase__ =state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase__ =state_dict[F"""lm_head.dense.{w}"""]
lowercase__ =state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowercase__ =state_dict[F"""{prefix}.ln_f.{w}"""]
lowercase__ =state_dict['lm_head.weight']
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 715 |
'''simple docstring'''
import math
def UpperCamelCase_ ( A__ ):
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 , A__ , 2 ):
if is_prime[i]:
primes.append(A__ )
return primes
def UpperCamelCase_ ( A__ = 99_99_66_66_33_33 ):
a_ = math.floor(math.sqrt(A__ ) ) + 1_00
a_ = prime_sieve(A__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 511 | 0 |
'''simple docstring'''
from math import loga
def _A ( lowercase__ ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 |
'''simple docstring'''
from typing import Any
class A :
def __init__( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self ) -> str:
'''simple docstring'''
return F'''Node({self.data})'''
class A :
def __init__( self ) -> int:
'''simple docstring'''
lowercase__ = None
def __iter__( self ) -> Any:
'''simple docstring'''
lowercase__ = self.head
while node:
yield node.data
lowercase__ = node.next
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __getitem__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
lowercase__ = self.head
for _ in range(lowerCamelCase__ ):
lowercase__ = current.next
lowercase__ = data
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
lowercase__ = Node(lowerCamelCase__ )
if self.head is None:
lowercase__ = new_node
elif index == 0:
lowercase__ = self.head # link new_node to head
lowercase__ = new_node
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = new_node
def A__ ( self ) -> None: # print every node data
'''simple docstring'''
print(self )
def A__ ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def A__ ( self ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A__ ( self , lowerCamelCase__ = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
lowercase__ = self.head # default first node
if index == 0:
lowercase__ = self.head.next
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = temp.next.next
return delete_node.data
def A__ ( self ) -> bool:
'''simple docstring'''
return self.head is None
def A__ ( self ) -> None:
'''simple docstring'''
lowercase__ = None
lowercase__ = self.head
while current:
# Store the current node's next node.
lowercase__ = current.next
# Make the current node's next point backwards
lowercase__ = prev
# Make the previous node be the current node
lowercase__ = current
# Make the current node the next node (to progress iteration)
lowercase__ = next_node
# Return prev in order to put the head at the end
lowercase__ = prev
def _A ( ):
lowercase__ = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase__ ) == i
linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase__ ) == 9
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowercase__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(-8 , 1 ) )
def _A ( ):
lowercase__ = [
-9,
100,
Node(77345112 ),
"""dlrow olleH""",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"""Hello, world!""",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
lowercase__ = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowercase__ = linked_list.delete_head()
assert result == -9
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowercase__ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowercase__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase__ )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(lowercase__ )
print("""\nReading/changing Node data using indexing:""" )
print(f'''Element at Position 1: {linked_list[1]}''' )
lowercase__ = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(lowercase__ )
print(f'''length of linked_list is : {len(lowercase__ )}''' )
if __name__ == "__main__":
main()
| 325 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCamelCase : Any = None
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : str = """▁"""
lowerCamelCase : List[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase : List[Any] = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
lowerCamelCase : Dict = {
"""google/pegasus-xsum""": 5_1_2,
}
class __snake_case( lowercase_ ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PegasusTokenizer
_A = ['''input_ids''', '''attention_mask''']
def __init__( self , A_=None , A_=None , A_="<pad>" , A_="</s>" , A_="<unk>" , A_="<mask_2>" , A_="<mask_1>" , A_=None , A_=103 , **A_ , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is'''
F''' {type(UpperCamelCase__ )}''' )
_SCREAMING_SNAKE_CASE = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(UpperCamelCase__ ) , self.offset - 1 )
]
if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_SCREAMING_SNAKE_CASE = additional_special_tokens_extended
else:
_SCREAMING_SNAKE_CASE = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , pad_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , mask_token_sent=UpperCamelCase__ , offset=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def A ( self , A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self , A_ , A_ = None , A_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self , A_ , A_=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self , A_ , A_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 711 |
"""simple docstring"""
from __future__ import annotations
import queue
class __snake_case:
def __init__( self , A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
def A__ ( ):
'''simple docstring'''
print('''\n********Press N to stop entering at any point of time********\n''' )
_SCREAMING_SNAKE_CASE = input('''Enter the value of the root node: ''' ).strip().lower()
_SCREAMING_SNAKE_CASE = queue.Queue()
_SCREAMING_SNAKE_CASE = TreeNode(int(UpperCamelCase__ ) )
q.put(UpperCamelCase__ )
while not q.empty():
_SCREAMING_SNAKE_CASE = q.get()
_SCREAMING_SNAKE_CASE = F'''Enter the left node of {node_found.data}: '''
_SCREAMING_SNAKE_CASE = input(UpperCamelCase__ ).strip().lower() or '''n'''
if check == "n":
return tree_node
_SCREAMING_SNAKE_CASE = TreeNode(int(UpperCamelCase__ ) )
_SCREAMING_SNAKE_CASE = left_node
q.put(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = F'''Enter the right node of {node_found.data}: '''
_SCREAMING_SNAKE_CASE = input(UpperCamelCase__ ).strip().lower() or '''n'''
if check == "n":
return tree_node
_SCREAMING_SNAKE_CASE = TreeNode(int(UpperCamelCase__ ) )
_SCREAMING_SNAKE_CASE = right_node
q.put(UpperCamelCase__ )
raise
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
_SCREAMING_SNAKE_CASE = queue.Queue()
q.put(UpperCamelCase__ )
while not q.empty():
_SCREAMING_SNAKE_CASE = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
_SCREAMING_SNAKE_CASE = queue.Queue()
q.put(UpperCamelCase__ )
while not q.empty():
_SCREAMING_SNAKE_CASE = []
while not q.empty():
_SCREAMING_SNAKE_CASE = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCamelCase__ )
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = n.left
# end of while means current node doesn't have left child
_SCREAMING_SNAKE_CASE = stack.pop()
# start to traverse its right child
_SCREAMING_SNAKE_CASE = n.right
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = node
while n or stack:
while n:
stack.append(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = n.left
_SCREAMING_SNAKE_CASE = stack.pop()
print(n.data , end=''',''' )
_SCREAMING_SNAKE_CASE = n.right
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = [], []
_SCREAMING_SNAKE_CASE = node
stacka.append(UpperCamelCase__ )
while stacka: # to find the reversed order of post order, store it in stack2
_SCREAMING_SNAKE_CASE = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCamelCase__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def A__ ( UpperCamelCase__ = "" , UpperCamelCase__=50 , UpperCamelCase__="*" ):
'''simple docstring'''
if not s:
return "\n" + width * char
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = divmod(width - len(UpperCamelCase__ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowerCamelCase : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 168 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = GPTaTokenizer
__SCREAMING_SNAKE_CASE = GPTaTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = {"""add_prefix_space""": True}
__SCREAMING_SNAKE_CASE = False
def A ( self : List[str] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
__snake_case = dict(zip(a_ , range(len(a_ ) ) ) )
__snake_case = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__snake_case = {"unk_token": "<unk>"}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def A ( self : List[str] , **a_ : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a_ )
def A ( self : Union[str, Any] , **a_ : Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def A ( self : List[str] , a_ : Any ):
"""simple docstring"""
__snake_case = "lower newer"
__snake_case = "lower newer"
return input_text, output_text
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = "lower newer"
__snake_case = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
__snake_case = tokenizer.tokenize(a_ , add_prefix_space=a_ )
self.assertListEqual(a_ , a_ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def A ( self : Any ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer(add_prefix_space=a_ )
__snake_case = "lower newer"
# Testing tokenization
__snake_case = tokenizer.tokenize(a_ , add_prefix_space=a_ )
__snake_case = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
__snake_case = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
__snake_case = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
__snake_case = self.get_rust_tokenizer(add_prefix_space=a_ )
__snake_case = tokenizer.encode(a_ , add_prefix_space=a_ )
__snake_case = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# Testing the unknown token
__snake_case = tokens + [rust_tokenizer.unk_token]
__snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def A ( self : Any , *a_ : int , **a_ : Optional[Any] ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] , a_ : Tuple=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
__snake_case = "This is a simple input"
__snake_case = ["This is a simple input 1", "This is a simple input 2"]
__snake_case = ("This is a simple input", "This is a pair")
__snake_case = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
def A ( self : Any ):
"""simple docstring"""
__snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
__snake_case = "This is a simple input"
__snake_case = ["This is a simple input looooooooong", "This is a simple input"]
__snake_case = ("This is a simple input", "This is a pair")
__snake_case = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
__snake_case = tokenizer.pad_token_id
__snake_case = tokenizer(a_ , padding="max_length" , max_length=30 , return_tensors="np" )
__snake_case = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
__snake_case = tokenizer(*a_ , padding="max_length" , max_length=60 , return_tensors="np" )
__snake_case = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = "$$$"
__snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ )
__snake_case = "This is a simple input"
__snake_case = ["This is a simple input 1", "This is a simple input 2"]
__snake_case = tokenizer.bos_token_id
__snake_case = tokenizer(a_ )
__snake_case = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0] , a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__snake_case = tokenizer.decode(out_s.input_ids )
__snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
pass
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = [self.get_tokenizer(do_lower_case=a_ , add_bos_token=a_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case = "Encode this."
__snake_case = "This one too please."
__snake_case = tokenizer.encode(a_ , add_special_tokens=a_ )
encoded_sequence += tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case = tokenizer.encode_plus(
a_ , a_ , add_special_tokens=a_ , return_special_tokens_mask=a_ , )
__snake_case = encoded_sequence_dict["input_ids"]
__snake_case = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(a_ ) , len(a_ ) )
__snake_case = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a_ )
]
__snake_case = [x for x in filtered_sequence if x is not None]
self.assertEqual(a_ , a_ )
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Any ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
__snake_case = "A photo of a cat"
__snake_case = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
__snake_case = AutoTokenizer.from_pretrained("./test_opt" )
__snake_case = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 250, 1_345, 9, 10, 4_758] )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=a_ )
__snake_case = "A photo of a cat"
__snake_case = tokenizer.encode(
a_ , )
# Same as above
self.assertEqual(a_ , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
__snake_case = "bos"
__snake_case = tokenizer.get_vocab()["bos"]
__snake_case = "A photo of a cat"
__snake_case = tokenizer.encode(
a_ , )
# We changed the bos token
self.assertEqual(a_ , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
__snake_case = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
__snake_case = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [31_957, 250, 1_345, 9, 10, 4_758] )
| 69 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Tuple = '''deta'''
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=9_00 ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=6 ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=8 ,SCREAMING_SNAKE_CASE__=6 ,SCREAMING_SNAKE_CASE__=10_24 ,SCREAMING_SNAKE_CASE__=8 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__="relu" ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1.0 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__="sine" ,SCREAMING_SNAKE_CASE__=5 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=3_00 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=5 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=5 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.2_5 ,**SCREAMING_SNAKE_CASE__ ,) -> str:
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__SCREAMING_SNAKE_CASE :List[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Any = backbone_config.pop('''model_type''' )
__SCREAMING_SNAKE_CASE :str = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE :Optional[int] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = backbone_config
__SCREAMING_SNAKE_CASE :Optional[int] = num_queries
__SCREAMING_SNAKE_CASE :str = max_position_embeddings
__SCREAMING_SNAKE_CASE :List[str] = d_model
__SCREAMING_SNAKE_CASE :int = encoder_ffn_dim
__SCREAMING_SNAKE_CASE :Union[str, Any] = encoder_layers
__SCREAMING_SNAKE_CASE :Optional[int] = encoder_attention_heads
__SCREAMING_SNAKE_CASE :List[Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE :Any = decoder_layers
__SCREAMING_SNAKE_CASE :Tuple = decoder_attention_heads
__SCREAMING_SNAKE_CASE :Tuple = dropout
__SCREAMING_SNAKE_CASE :Tuple = attention_dropout
__SCREAMING_SNAKE_CASE :List[Any] = activation_dropout
__SCREAMING_SNAKE_CASE :List[Any] = activation_function
__SCREAMING_SNAKE_CASE :int = init_std
__SCREAMING_SNAKE_CASE :Dict = init_xavier_std
__SCREAMING_SNAKE_CASE :List[str] = encoder_layerdrop
__SCREAMING_SNAKE_CASE :str = auxiliary_loss
__SCREAMING_SNAKE_CASE :Union[str, Any] = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE :List[Any] = num_feature_levels
__SCREAMING_SNAKE_CASE :Union[str, Any] = encoder_n_points
__SCREAMING_SNAKE_CASE :Optional[Any] = decoder_n_points
__SCREAMING_SNAKE_CASE :Optional[int] = two_stage
__SCREAMING_SNAKE_CASE :Dict = two_stage_num_proposals
__SCREAMING_SNAKE_CASE :str = with_box_refine
__SCREAMING_SNAKE_CASE :Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
__SCREAMING_SNAKE_CASE :Dict = class_cost
__SCREAMING_SNAKE_CASE :Dict = bbox_cost
__SCREAMING_SNAKE_CASE :List[Any] = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE :Optional[int] = mask_loss_coefficient
__SCREAMING_SNAKE_CASE :Optional[Any] = dice_loss_coefficient
__SCREAMING_SNAKE_CASE :Tuple = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE :int = giou_loss_coefficient
__SCREAMING_SNAKE_CASE :int = eos_coefficient
__SCREAMING_SNAKE_CASE :Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.d_model
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE :List[Any] = self.__class__.model_type
return output | 498 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list[list] ) ->list[list]:
'''simple docstring'''
a : Union[str, Any] = current_set.copy()
for row_index, row in enumerate(_lowercase ):
a : Optional[int] = row[0]
for column_index, column in enumerate(_lowercase ):
if magnitude == 0:
a : Optional[Any] = column
continue
a : str = column / magnitude
# Subtract to cancel term
a : Union[str, Any] = current_set[0]
a : Optional[Any] = [first_row]
a : List[Any] = current_set[1::]
for row in current_set:
a : Any = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_lowercase )
continue
for column_index in range(len(_lowercase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_lowercase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
a : Tuple = final_set[0]
a : Any = []
a : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
a : List[str] = simplify(_lowercase )
for i in range(len(_lowercase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _lowercase )
a : Union[str, Any] = resultant
return final_set
def _SCREAMING_SNAKE_CASE ( _lowercase : list[list] ) ->list:
'''simple docstring'''
if len(_lowercase ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
a : Optional[int] = len(_lowercase ) + 1
if any(len(_lowercase ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(_lowercase , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(_lowercase ) == 1:
return [equations[0][-1] / equations[0][0]]
a : Tuple = equations.copy()
if any(0 in row for row in data_set ):
a : List[str] = data_set.copy()
a : List[str] = []
for row_index, row in enumerate(_lowercase ):
if 0 not in row:
a : List[Any] = data_set.pop(_lowercase )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , _lowercase )
a : int = data_set.copy()
a : List[str] = simplify(_lowercase )
a : Tuple = simplified[::-1]
a : list = []
for row in simplified:
a : str = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
a : Optional[Any] = row.copy()[: len(_lowercase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_lowercase ) == 0:
solutions.append(0 )
continue
a : Dict = temp_row[1::]
a : Union[str, Any] = temp_row[::-1]
for column_index, column in enumerate(_lowercase ):
current_solution -= column * solutions[column_index]
solutions.append(_lowercase )
a : List[Any] = []
for item in solutions:
final.append(float(round(_lowercase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 708 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: Any ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self: int ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
__UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
sd_pipe.set_scheduler("sample_euler" )
__UpperCAmelCase = "A painting of a squirrel eating a burger"
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
__UpperCAmelCase = output.images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self: Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
sd_pipe.set_scheduler("sample_euler" )
__UpperCAmelCase = "A painting of a squirrel eating a burger"
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
__UpperCAmelCase = output.images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _UpperCAmelCase ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
__UpperCAmelCase = "A painting of a squirrel eating a burger"
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=__lowerCAmelCase , )
__UpperCAmelCase = output.images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 221 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
lowerCAmelCase__ = Path(__file__).parent / """model_card_template.md"""
lowerCAmelCase__ = uuida().hex
lowerCAmelCase__ = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[Dict, str, None] = None ) -> str:
'''simple docstring'''
A__ = F'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'; torch/{_torch_version}'
if is_flax_available():
ua += F'; jax/{_jax_version}'
ua += F'; flax/{_flax_version}'
if is_onnx_available():
ua += F'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + "; ".join(F'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + user_agent
return ua
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[str] = None , SCREAMING_SNAKE_CASE_: Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
if token is None:
A__ = HfFolder.get_token()
if organization is None:
A__ = whoami(SCREAMING_SNAKE_CASE_ )["name"]
return F'{username}/{model_id}'
else:
return F'{organization}/{model_id}'
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(SCREAMING_SNAKE_CASE_ , "local_rank" ) and args.local_rank not in [-1, 0]:
return
A__ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE_ , "hub_token" ) else None
A__ = get_full_repo_name(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ )
A__ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , repo_name=SCREAMING_SNAKE_CASE_ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE_ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE_ , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE_ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE_ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE_ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE_ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE_ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE_ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE_ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
A__ = os.path.join(args.output_dir , "README.md" )
model_card.save(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[str] , SCREAMING_SNAKE_CASE_: Optional[str] = None ) -> List[str]:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).as_posix() )
A__ = re.search(R"snapshots/([^/]+)/" , SCREAMING_SNAKE_CASE_ )
if search is None:
return None
A__ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase__ = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowerCAmelCase__ = os.path.join(hf_cache_home, """diffusers""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[str] = None , SCREAMING_SNAKE_CASE_: Optional[str] = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
A__ = DIFFUSERS_CACHE
if old_cache_dir is None:
A__ = old_diffusers_cache
A__ = Path(SCREAMING_SNAKE_CASE_ ).expanduser()
A__ = Path(SCREAMING_SNAKE_CASE_ ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A__ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE_ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
os.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
try:
os.symlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase__ = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowerCAmelCase__ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase__ = int(f.read())
except ValueError:
lowerCAmelCase__ = 0
if cache_version < 1:
lowerCAmelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowerCAmelCase__ = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[str] = None ) -> str:
'''simple docstring'''
if variant is not None:
A__ = weights_name.split("." )
A__ = splits[:-1] + [variant] + splits[-1:]
A__ = ".".join(SCREAMING_SNAKE_CASE_ )
return weights_name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , *,
SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Optional[int]=None , ) -> Optional[int]:
'''simple docstring'''
A__ = str(SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE_ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
# Load from a PyTorch checkpoint
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model_file
else:
raise EnvironmentError(
F'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse("0.20.0" )
):
try:
A__ = hf_hub_download(
SCREAMING_SNAKE_CASE_ , filename=_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , )
warnings.warn(
F'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , SCREAMING_SNAKE_CASE_ , )
return model_file
except: # noqa: E722
warnings.warn(
F'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}\' so that the correct variant file can be added.' , SCREAMING_SNAKE_CASE_ , )
try:
# 2. Load model file as usual
A__ = hf_hub_download(
SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
F'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
F'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
F'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
F' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
F' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
F'containing a file named {weights_name}' )
| 514 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = ['''ConvNextFeatureExtractor''']
_SCREAMING_SNAKE_CASE : Optional[int] = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 137 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a ( __snake_case ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Distribution , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> Optional[Any]:
lowerCamelCase_ = 1.0 if scale is None else scale
lowerCamelCase_ = 0.0 if loc is None else loc
super().__init__(__SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__SCREAMING_SNAKE_CASE )] )
@property
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCamelCase ( self : int ) -> Any:
return self.base_dist.variance * self.scale**2
@property
def UpperCamelCase ( self : List[Any] ) -> str:
return self.variance.sqrt()
class a ( nn.Module ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Callable[..., Tuple[torch.Tensor]] , **__SCREAMING_SNAKE_CASE : str ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = args_dim
lowerCamelCase_ = nn.ModuleList([nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
lowerCamelCase_ = domain_map
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Tuple[torch.Tensor]:
lowerCamelCase_ = [proj(__SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*__SCREAMING_SNAKE_CASE )
class a ( nn.Module ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
super().__init__()
lowerCamelCase_ = function
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
return self.function(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
class a :
SCREAMING_SNAKE_CASE : type
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Dict[str, int]
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int = 1 ) -> None:
lowerCamelCase_ = dim
lowerCamelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
if self.dim == 1:
return self.distribution_class(*__SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*__SCREAMING_SNAKE_CASE ) , 1 )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , ) -> Distribution:
lowerCamelCase_ = self._base_distribution(__SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__SCREAMING_SNAKE_CASE , loc=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def UpperCamelCase ( self : int ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
return len(self.event_shape )
@property
def UpperCamelCase ( self : List[Any] ) -> float:
return 0.0
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> nn.Module:
return ParameterProjection(
in_features=__SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCamelCase ( self : List[str] , *__SCREAMING_SNAKE_CASE : torch.Tensor ) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCamelCase ( __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(__SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE : type = StudentT
@classmethod
def UpperCamelCase ( cls : str , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Optional[Any]:
lowerCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase_ = 2.0 + cls.squareplus(__SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict[str, int] = {"loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE : type = Normal
@classmethod
def UpperCamelCase ( cls : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Tuple:
lowerCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict[str, int] = {"total_count": 1, "logits": 1}
SCREAMING_SNAKE_CASE : type = NegativeBinomial
@classmethod
def UpperCamelCase ( cls : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Optional[Any]:
lowerCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Distribution:
lowerCamelCase_ , lowerCamelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE ) , 1 )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ) -> Distribution:
lowerCamelCase_ , lowerCamelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 137 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case :Union[str, Any] ={
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[int] =[
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case :List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 106 |
from typing import List
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase__ : dict ) -> int:
'''simple docstring'''
A = {key: len(lowerCAmelCase__ ) for key, value in gen_kwargs.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
A = max(lists_lengths.values() , default=0 )
return max(1 , lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> List[range]:
'''simple docstring'''
A = []
for group_idx in range(lowerCAmelCase__ ):
A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
A = range(lowerCAmelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowerCAmelCase__ )
return shards_indices_per_group
def lowerCamelCase_ ( lowerCAmelCase__ : dict , lowerCAmelCase__ : int ) -> List[dict]:
'''simple docstring'''
A = _number_of_shards_in_gen_kwargs(lowerCAmelCase__ )
if num_shards == 1:
return [dict(lowerCAmelCase__ )]
else:
A = _distribute_shards(num_shards=lowerCAmelCase__ , max_num_jobs=lowerCAmelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowerCAmelCase__ ) )
]
def lowerCamelCase_ ( lowerCAmelCase__ : List[dict] ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowerCAmelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCamelCase_ ( lowerCAmelCase__ : np.random.Generator , lowerCAmelCase__ : dict ) -> dict:
'''simple docstring'''
A = {len(lowerCAmelCase__ ) for value in gen_kwargs.values() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )}
A = {}
for size in list_sizes:
A = list(range(lowerCAmelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
A = dict(lowerCAmelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A = [value[i] for i in indices_per_size[len(lowerCAmelCase__ )]]
return shuffled_kwargs | 106 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a_ ( __UpperCAmelCase ):
def __a ( self :Union[str, Any]) -> Optional[Any]:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''hidden_sizes'''))
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''neck_hidden_sizes'''))
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_attention_heads'''))
class a_ :
def __init__( self :List[str] , _lowercase :Optional[int] , _lowercase :List[str]=13 , _lowercase :Union[str, Any]=32 , _lowercase :int=2 , _lowercase :Optional[Any]=3 , _lowercase :List[Any]=640 , _lowercase :str=4 , _lowercase :Optional[Any]="silu" , _lowercase :int=3 , _lowercase :Tuple=32 , _lowercase :Union[str, Any]=0.1 , _lowercase :str=0.1 , _lowercase :Union[str, Any]=0.1 , _lowercase :Any=0.02 , _lowercase :Dict=True , _lowercase :Dict=True , _lowercase :Dict=10 , _lowercase :Union[str, Any]=None , ) -> Union[str, Any]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = last_hidden_size
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = conv_kernel_size
UpperCAmelCase_ = output_stride
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self :List[Any]) -> str:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __a ( self :str , _lowercase :Optional[Any] , _lowercase :int , _lowercase :int , _lowercase :Dict) -> Optional[Any]:
UpperCAmelCase_ = MobileViTModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
UpperCAmelCase_ = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple , _lowercase :int , _lowercase :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTForImageClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
UpperCAmelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __a ( self :Optional[int] , _lowercase :str , _lowercase :int , _lowercase :Optional[Any] , _lowercase :Any) -> Optional[int]:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileViTForSemanticSegmentation(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
UpperCAmelCase_ = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
UpperCamelCase__ : str =(
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase__ : List[Any] =(
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Any =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Optional[Any] =False
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = MobileViTModelTester(self)
UpperCAmelCase_ = MobileViTConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE)
def __a ( self :Any) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''')
def __a ( self :Tuple) -> Optional[int]:
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''')
def __a ( self :str) -> Tuple:
pass
@unittest.skip(reason='''MobileViT does not output attentions''')
def __a ( self :int) -> int:
pass
def __a ( self :Tuple) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def __a ( self :int) -> List[str]:
pass
def __a ( self :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def __a ( self :Tuple) -> List[Any]:
def check_hidden_states_output(_lowercase :List[str] , _lowercase :str , _lowercase :Optional[int]):
UpperCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 5
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ = 2
for i in range(len(__SCREAMING_SNAKE_CASE)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def __a ( self :List[Any]) -> Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE)
def __a ( self :Tuple) -> Optional[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE)
@slow
def __a ( self :Union[str, Any]) -> Optional[int]:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MobileViTModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def A ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def __a ( self :int) -> Dict:
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''') if is_vision_available() else None
@slow
def __a ( self :Dict) -> int:
UpperCAmelCase_ = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''').to(__SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = torch.tensor([-1.9_364, -1.2_327, -0.4_653]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
@slow
def __a ( self :Union[str, Any]) -> Optional[int]:
UpperCAmelCase_ = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''')
UpperCAmelCase_ = model.to(__SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''')
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
@slow
def __a ( self :List[Any]) -> Tuple:
UpperCAmelCase_ = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''')
UpperCAmelCase_ = model.to(__SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''')
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(50, 60)])
UpperCAmelCase_ = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE)
| 716 |
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(__UpperCAmelCase , x % y )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(__UpperCAmelCase , __UpperCAmelCase )
def A ( __UpperCAmelCase = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(__UpperCAmelCase , __UpperCAmelCase )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 561 | 0 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
UpperCAmelCase_ : Tuple = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ : Optional[int] = get_tests_dir('fixtures/vocab.json')
UpperCAmelCase_ : Optional[Any] = get_tests_dir('fixtures')
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = 0
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaConfig()
_SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
_SCREAMING_SNAKE_CASE : Dict = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE : Dict = WavaVecaFeatureExtractor()
_SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_SCREAMING_SNAKE_CASE : Any = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Dict = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
_SCREAMING_SNAKE_CASE : Optional[int] = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaFeatureExtractor()
_SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Dict = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
_SCREAMING_SNAKE_CASE : str = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
_SCREAMING_SNAKE_CASE : int = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
_SCREAMING_SNAKE_CASE : str = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
_SCREAMING_SNAKE_CASE : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
_SCREAMING_SNAKE_CASE : Optional[int] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
_SCREAMING_SNAKE_CASE : Any = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_SCREAMING_SNAKE_CASE : List[Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def UpperCAmelCase_ ( self ):
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
_SCREAMING_SNAKE_CASE : Dict = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_SCREAMING_SNAKE_CASE : List[str] = CustomTokenizer(__snake_case )
_SCREAMING_SNAKE_CASE : int = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
_SCREAMING_SNAKE_CASE : str = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self ):
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : int = False
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Any = False
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Optional[Any] = """AutoFeatureExtractor"""
A_ : List[Any] = """AutoTokenizer"""
A_ : Optional[Any] = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
_SCREAMING_SNAKE_CASE : int = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_SCREAMING_SNAKE_CASE : Tuple = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_SCREAMING_SNAKE_CASE : Dict = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCAmelCase_ ( cls ):
_SCREAMING_SNAKE_CASE : Any = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def UpperCAmelCase_ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : List[Any] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
_SCREAMING_SNAKE_CASE : Any = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase_ ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_SCREAMING_SNAKE_CASE : List[str] = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_SCREAMING_SNAKE_CASE : List[str] = CustomTokenizer(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
_SCREAMING_SNAKE_CASE : Union[str, Any] = Repository(__snake_case , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
_SCREAMING_SNAKE_CASE : str = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
_SCREAMING_SNAKE_CASE : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 533 |
'''simple docstring'''
import numpy as np
class lowercase__ :
'''simple docstring'''
def __init__( self ):
_SCREAMING_SNAKE_CASE : List[Any] = (0, 0)
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __eq__( self , __snake_case ):
return self.position == cell.position
def UpperCAmelCase_ ( self ):
print(self.position )
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case=(5, 5) ):
_SCREAMING_SNAKE_CASE : str = np.zeros(__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = world_size[0]
_SCREAMING_SNAKE_CASE : Tuple = world_size[1]
def UpperCAmelCase_ ( self ):
print(self.w )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_SCREAMING_SNAKE_CASE : Optional[int] = cell.position[0]
_SCREAMING_SNAKE_CASE : Optional[int] = cell.position[1]
_SCREAMING_SNAKE_CASE : Tuple = []
for n in neughbour_cord:
_SCREAMING_SNAKE_CASE : Optional[Any] = current_x + n[0]
_SCREAMING_SNAKE_CASE : List[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_SCREAMING_SNAKE_CASE : Optional[int] = Cell()
_SCREAMING_SNAKE_CASE : Union[str, Any] = (x, y)
_SCREAMING_SNAKE_CASE : Dict = cell
neighbours.append(__snake_case )
return neighbours
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
_SCREAMING_SNAKE_CASE : Any = np.argmin([n.f for n in _open] )
_SCREAMING_SNAKE_CASE : List[Any] = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
_SCREAMING_SNAKE_CASE : Optional[int] = current.g + 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = n.position
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = goal.position
_SCREAMING_SNAKE_CASE : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
_SCREAMING_SNAKE_CASE : Any = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Any = []
while current.parent is not None:
path.append(current.position )
_SCREAMING_SNAKE_CASE : List[Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = Gridworld()
# Start position and goal
UpperCAmelCase_ : Optional[Any] = Cell()
UpperCAmelCase_ : Tuple = (0, 0)
UpperCAmelCase_ : Optional[Any] = Cell()
UpperCAmelCase_ : Tuple = (4, 4)
print(F"path from {start.position} to {goal.position}")
UpperCAmelCase_ : Union[str, Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase_ : Optional[int] = 1
print(world.w)
| 533 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="resnet50" , snake_case_=3 , snake_case_=32 , snake_case_=3 , snake_case_=True , snake_case_=True , ) -> Any:
UpperCamelCase__ = parent
UpperCamelCase__ = out_indices if out_indices is not None else [4]
UpperCamelCase__ = stage_names
UpperCamelCase__ = out_features
UpperCamelCase__ = backbone
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = is_training
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = TimmBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCamelCase ( _a , _a , _a , unittest.TestCase ):
a : Optional[int] =(TimmBackbone,) if is_torch_available() else ()
a : Any ={"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a : Optional[Any] =False
a : Any =False
a : Any =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = TimmBackboneModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = 'resnet18'
UpperCamelCase__ = 'microsoft/resnet-18'
UpperCamelCase__ = AutoBackbone.from_pretrained(snake_case_ , use_timm_backbone=snake_case_ )
UpperCamelCase__ = AutoBackbone.from_pretrained(snake_case_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCamelCase__ = AutoBackbone.from_pretrained(snake_case_ , use_timm_backbone=snake_case_ , out_indices=[1, 2, 3] )
UpperCamelCase__ = AutoBackbone.from_pretrained(snake_case_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCamelCase__ = self.all_model_classes[0]
UpperCamelCase__ = model_class(snake_case_ )
model.to(snake_case_ )
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCamelCase__ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCamelCase__ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(**snake_case_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCamelCase__ = copy.deepcopy(snake_case_ )
UpperCamelCase__ = None
UpperCamelCase__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(**snake_case_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCamelCase__ = copy.deepcopy(snake_case_ )
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(**snake_case_ )
| 20 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_A : Optional[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_A : Union[str, Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return np.sqrt(np.sum((np.asarray(_UpperCamelCase ) - np.asarray(_UpperCamelCase )) ** 2 ) )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
return sum((va - va) ** 2 for va, va in zip(_UpperCamelCase , _UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def __snake_case ( ) -> Any:
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 100 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 100 ):
"""simple docstring"""
lowercase_ : Dict = n * (n + 1) * (2 * n + 1) / 6
lowercase_ : int = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 620 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase : List[str] = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 168 |
"""simple docstring"""
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , ) -> Tuple:
lowerCAmelCase_ :List[Any] = size if size is not None else {"""shortest_edge""": 18}
lowerCAmelCase_ :Optional[int] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase_ :Union[str, Any] = parent
lowerCAmelCase_ :int = batch_size
lowerCAmelCase_ :Any = num_channels
lowerCAmelCase_ :List[Any] = image_size
lowerCAmelCase_ :List[str] = min_resolution
lowerCAmelCase_ :Optional[int] = max_resolution
lowerCAmelCase_ :Optional[int] = do_resize
lowerCAmelCase_ :Optional[int] = size
lowerCAmelCase_ :Tuple = do_center_crop
lowerCAmelCase_ :int = crop_size
lowerCAmelCase_ :Union[str, Any] = do_normalize
lowerCAmelCase_ :Any = image_mean
lowerCAmelCase_ :List[Any] = image_std
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( lowercase_ , unittest.TestCase ):
UpperCAmelCase_ :str = LevitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Tuple = LevitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_center_crop""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
lowerCAmelCase_ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
def __lowerCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowerCAmelCase_ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCAmelCase_ :int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ :Optional[int] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
lowerCAmelCase_ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ :List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
lowerCAmelCase_ :Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ :Union[str, Any] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 720 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = "wavlm"
def __init__( self , __A=32 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.1 , __A=0.1 , __A=0.0_2 , __A=1E-5 , __A="group" , __A="gelu" , __A=(512, 512, 512, 512, 512, 512, 512) , __A=(5, 2, 2, 2, 2, 2, 2) , __A=(10, 3, 3, 3, 3, 2, 2) , __A=False , __A=128 , __A=16 , __A=320 , __A=800 , __A=False , __A=True , __A=0.0_5 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=320 , __A=2 , __A=0.1 , __A=100 , __A=256 , __A=256 , __A=0.1 , __A="mean" , __A=False , __A=False , __A=256 , __A=(512, 512, 512, 512, 1500) , __A=(5, 3, 3, 1, 1) , __A=(1, 2, 3, 1, 1) , __A=512 , __A=80 , __A=0 , __A=1 , __A=2 , __A=False , __A=3 , __A=2 , __A=3 , __A=None , **__A , ) -> Any:
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
lowerCAmelCase_ :Any = hidden_size
lowerCAmelCase_ :Union[str, Any] = feat_extract_norm
lowerCAmelCase_ :Optional[Any] = feat_extract_activation
lowerCAmelCase_ :int = list(__A )
lowerCAmelCase_ :Optional[int] = list(__A )
lowerCAmelCase_ :List[Any] = list(__A )
lowerCAmelCase_ :Any = conv_bias
lowerCAmelCase_ :int = num_buckets
lowerCAmelCase_ :List[str] = max_bucket_distance
lowerCAmelCase_ :List[str] = num_conv_pos_embeddings
lowerCAmelCase_ :Dict = num_conv_pos_embedding_groups
lowerCAmelCase_ :Union[str, Any] = len(self.conv_dim )
lowerCAmelCase_ :Dict = num_hidden_layers
lowerCAmelCase_ :List[str] = intermediate_size
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :List[Any] = num_attention_heads
lowerCAmelCase_ :Union[str, Any] = hidden_dropout
lowerCAmelCase_ :Optional[Any] = attention_dropout
lowerCAmelCase_ :List[Any] = activation_dropout
lowerCAmelCase_ :Union[str, Any] = feat_proj_dropout
lowerCAmelCase_ :Optional[Any] = final_dropout
lowerCAmelCase_ :Optional[Any] = layerdrop
lowerCAmelCase_ :Union[str, Any] = layer_norm_eps
lowerCAmelCase_ :Union[str, Any] = initializer_range
lowerCAmelCase_ :List[Any] = num_ctc_classes
lowerCAmelCase_ :Tuple = vocab_size
lowerCAmelCase_ :List[str] = do_stable_layer_norm
lowerCAmelCase_ :Union[str, Any] = use_weighted_layer_sum
lowerCAmelCase_ :Optional[int] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ :List[str] = apply_spec_augment
lowerCAmelCase_ :Tuple = mask_time_prob
lowerCAmelCase_ :Optional[Any] = mask_time_length
lowerCAmelCase_ :int = mask_time_min_masks
lowerCAmelCase_ :Optional[Any] = mask_feature_prob
lowerCAmelCase_ :Optional[int] = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase_ :Optional[Any] = num_codevectors_per_group
lowerCAmelCase_ :Optional[int] = num_codevector_groups
lowerCAmelCase_ :Tuple = contrastive_logits_temperature
lowerCAmelCase_ :Tuple = num_negatives
lowerCAmelCase_ :str = codevector_dim
lowerCAmelCase_ :int = proj_codevector_dim
lowerCAmelCase_ :Optional[Any] = diversity_loss_weight
# ctc loss
lowerCAmelCase_ :Union[str, Any] = ctc_loss_reduction
lowerCAmelCase_ :Optional[Any] = ctc_zero_infinity
# adapter
lowerCAmelCase_ :Union[str, Any] = add_adapter
lowerCAmelCase_ :List[str] = adapter_kernel_size
lowerCAmelCase_ :Union[str, Any] = adapter_stride
lowerCAmelCase_ :Union[str, Any] = num_adapter_layers
lowerCAmelCase_ :Tuple = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase_ :str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ :List[Any] = list(__A )
lowerCAmelCase_ :List[str] = list(__A )
lowerCAmelCase_ :Optional[int] = list(__A )
lowerCAmelCase_ :Optional[int] = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 256 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Any , A : List[str] , A : Optional[Any]=13 , A : Optional[Any]=3 , A : int=2_24 , A : Tuple=30 , A : Tuple=4_00 , A : int=True , A : int=None , A : str=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : List[str]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
lowercase_ : Dict = size if size is not None else {'''height''': 18, '''width''': 18}
lowercase_ : Any = parent
lowercase_ : int = batch_size
lowercase_ : Dict = num_channels
lowercase_ : Dict = image_size
lowercase_ : Any = min_resolution
lowercase_ : Any = max_resolution
lowercase_ : Tuple = do_resize
lowercase_ : Optional[int] = size
lowercase_ : int = do_normalize
lowercase_ : Tuple = image_mean
lowercase_ : Tuple = image_std
def A ( self : Any ) -> Dict:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = ViTImageProcessor if is_vision_available() else None
def A ( self : int ) -> Optional[Any]:
lowercase_ : Dict = EfficientFormerImageProcessorTester(self )
@property
def A ( self : int ) -> Union[str, Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def A ( self : Union[str, Any] ) -> List[str]:
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def A ( self : Union[str, Any] ) -> Tuple:
pass
def A ( self : Tuple ) -> Dict:
# Initialize image_processor
lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase_ : Optional[int] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowercase_ : Tuple = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def A ( self : List[str] ) -> List[str]:
# Initialize image_processor
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
lowercase_ : int = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowercase_ : str = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def A ( self : Dict ) -> Tuple:
# Initialize image_processor
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowercase_ : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowercase_ : Any = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 231 |
'''simple docstring'''
import os
def __lowerCamelCase ( UpperCAmelCase_ = "input.txt" ) ->int:
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as input_file:
snake_case__ = [
[int(UpperCAmelCase_ ) for element in line.split(',' )]
for line in input_file.readlines()
]
snake_case__ = len(UpperCAmelCase_ )
snake_case__ = len(matrix[0] )
snake_case__ = [[-1 for _ in range(UpperCAmelCase_ )] for _ in range(UpperCAmelCase_ )]
for i in range(UpperCAmelCase_ ):
snake_case__ = matrix[i][0]
for j in range(1 , UpperCAmelCase_ ):
for i in range(UpperCAmelCase_ ):
snake_case__ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase_ ):
snake_case__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
snake_case__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368 | 0 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
__UpperCamelCase : int = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __UpperCAmelCase ( _snake_case : Any, _snake_case : str, _snake_case : Optional[Any], _snake_case : Tuple, _snake_case : int=False, _snake_case : Dict=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_lowercase , _lowercase , _lowercase , _lowercase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_lowercase = cached_file(_snake_case, _snake_case, force_download=not use_cached_models )
_lowercase = config_class.from_json_file(_snake_case )
_lowercase = True
_lowercase = True
print(f"""Building TensorFlow model from configuration: {config}""" )
_lowercase = model_class(_snake_case )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_lowercase = cached_file(
_snake_case, _snake_case, force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_lowercase = load_pytorch_checkpoint_in_tfa_model(_snake_case, _snake_case )
if compare_with_pt_model:
_lowercase = tf_model(tf_model.dummy_inputs, training=_snake_case ) # build the network
_lowercase = torch.load(_snake_case, map_location="cpu" )
_lowercase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_snake_case, config=_snake_case, state_dict=_snake_case )
with torch.no_grad():
_lowercase = pt_model(**pt_model.dummy_inputs )
_lowercase = pto[0].numpy()
_lowercase = tfo[0].numpy()
_lowercase = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2e-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(_snake_case, save_format="h5" )
def __UpperCAmelCase ( _snake_case : List[str], _snake_case : List[str], _snake_case : Tuple=None, _snake_case : List[str]=None, _snake_case : Optional[Any]=False, _snake_case : List[str]=False, _snake_case : Union[str, Any]=False, _snake_case : Any=False, ):
if args_model_type is None:
_lowercase = list(MODEL_CLASSES.keys() )
else:
_lowercase = [args_model_type]
for j, model_type in enumerate(_snake_case, start=1 ):
print("=" * 1_0_0 )
print(f""" Converting model type {j}/{len(_snake_case )}: {model_type}""" )
print("=" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_lowercase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_lowercase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_snake_case, _snake_case ), start=1 ):
print("-" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
_lowercase = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(_snake_case )}: {model_shortcut_name} - model_type {model_type}""" )
print("-" * 1_0_0 )
if config_shortcut_name in aws_config_map:
_lowercase = cached_file(_snake_case, _snake_case, force_download=not use_cached_models )
else:
_lowercase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_lowercase = cached_file(_snake_case, _snake_case, force_download=not use_cached_models )
else:
_lowercase = model_shortcut_name
if os.path.isfile(_snake_case ):
_lowercase = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=_snake_case, pytorch_checkpoint_path=_snake_case, config_file=_snake_case, tf_dump_path=os.path.join(_snake_case, model_shortcut_name + "-tf_model.h5" ), compare_with_pt_model=_snake_case, )
if remove_cached_files:
os.remove(_snake_case )
os.remove(_snake_case )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
__UpperCamelCase : str = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
) | 705 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = """trocr"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Dict , _lowercase : Dict=5_0_2_6_5 , _lowercase : Tuple=1_0_2_4 , _lowercase : int=1_2 , _lowercase : Dict=1_6 , _lowercase : Any=4_0_9_6 , _lowercase : Optional[int]="gelu" , _lowercase : int=5_1_2 , _lowercase : List[Any]=0.1 , _lowercase : Dict=0.0 , _lowercase : int=0.0 , _lowercase : Any=2 , _lowercase : Tuple=0.02 , _lowercase : Dict=0.0 , _lowercase : int=True , _lowercase : Tuple=False , _lowercase : Dict=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=1 , _lowercase : Tuple=0 , _lowercase : str=2 , **_lowercase : Tuple , ) -> str:
_lowercase = vocab_size
_lowercase = d_model
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = activation_function
_lowercase = max_position_embeddings
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = init_std
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = scale_embedding
_lowercase = use_learned_position_embeddings
_lowercase = layernorm_embedding
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , ) | 227 | 0 |
import argparse
import os
import re
lowerCAmelCase_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def snake_case( __magic_name__ , __magic_name__ = False ) -> Optional[Any]:
'''simple docstring'''
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase : Optional[Any] = f.read()
lowercase : Optional[int] = content.split('''\n''' )
lowercase : str = []
lowercase : Union[str, Any] = 0
while line_idx < len(__magic_name__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase : int = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase : List[Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase : List[Any] = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase : int = sorted(__magic_name__ , key=lambda __magic_name__ : _re_identifier.search(__magic_name__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(__magic_name__ ) )
elif "\n".join(__magic_name__ ) != content:
return True
def snake_case( __magic_name__ = False ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] = [os.path.join(__magic_name__ , __magic_name__ ) for f in os.listdir(__magic_name__ ) if f.endswith('''.py''' )]
lowercase : Optional[int] = [sort_auto_mapping(__magic_name__ , overwrite=__magic_name__ ) for fname in fnames]
if not overwrite and any(__magic_name__ ):
lowercase : Tuple = [f for f, d in zip(__magic_name__ , __magic_name__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(__magic_name__ )}. Run `make style` to fix"""
''' this.''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only) | 217 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
lowercase : Optional[int] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , __magic_name__ )
if matches:
lowercase : Optional[int] = float(matches[1] )
lowercase : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase : Optional[int] = 10_01
lowercase : str = '''imagenet-1k-id2label.json'''
lowercase : Optional[Any] = '''huggingface/label-files'''
lowercase : Optional[int] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Any = {int(__magic_name__ ) + 1: v for k, v in idalabel.items()}
lowercase : Any = '''background'''
lowercase : Any = idalabel
lowercase : Any = {v: k for k, v in idalabel.items()}
return config
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ) -> int:
'''simple docstring'''
lowercase : int = get_mobilenet_va_config(__magic_name__ )
# Load 🤗 model
lowercase : Tuple = MobileNetVaForImageClassification(__magic_name__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__magic_name__ , __magic_name__ , __magic_name__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase : List[Any] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
lowercase : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase : Optional[Any] = model(**__magic_name__ )
lowercase : int = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
lowercase : str = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase : Optional[Any] = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowercase : int = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
print('''Pushing to the hub...''' )
lowercase : int = '''google/''' + model_name
image_processor.push_to_hub(__magic_name__ )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 217 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {"""vocab_file""": """spiece.model"""}
lowerCamelCase_ : List[str] = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : Any , snake_case_ : int=False , snake_case_ : int=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="<s>" , snake_case_ : Optional[Any]="</s>" , snake_case_ : Union[str, Any]="<unk>" , snake_case_ : Union[str, Any]="<sep>" , snake_case_ : str="<pad>" , snake_case_ : List[Any]="<cls>" , snake_case_ : Optional[int]="<mask>" , snake_case_ : int=["<eop>", "<eod>"] , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : Optional[int] , ):
UpperCamelCase_: Optional[int] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
UpperCamelCase_: Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
UpperCamelCase_: Optional[Any] = 3
UpperCamelCase_: int = do_lower_case
UpperCamelCase_: Union[str, Any] = remove_space
UpperCamelCase_: List[Any] = keep_accents
UpperCamelCase_: Any = vocab_file
UpperCamelCase_: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
UpperCamelCase_: List[Any] = jieba
UpperCamelCase_: Dict = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowerCAmelCase__ ( self : Dict ):
return len(self.sp_model )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Any = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
UpperCamelCase_: Any = self.__dict__.copy()
UpperCamelCase_: Optional[int] = None
return state
def __setstate__( self : List[str] , snake_case_ : List[Any] ):
UpperCamelCase_: int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase_: Tuple = {}
UpperCamelCase_: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self : str , snake_case_ : Union[str, Any] ):
if self.remove_space:
UpperCamelCase_: List[Any] = """ """.join(inputs.strip().split() )
else:
UpperCamelCase_: List[Any] = inputs
UpperCamelCase_: str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCamelCase_: List[Any] = unicodedata.normalize("""NFKD""" , snake_case_ )
UpperCamelCase_: Any = """""".join([c for c in outputs if not unicodedata.combining(snake_case_ )] )
if self.do_lower_case:
UpperCamelCase_: int = outputs.lower()
return outputs
def lowerCAmelCase__ ( self : List[str] , snake_case_ : str ):
UpperCamelCase_: str = self.preprocess_text(snake_case_ )
UpperCamelCase_: Optional[int] = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
UpperCamelCase_: Dict = []
for piece in pieces:
if len(snake_case_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCamelCase_: Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase_: List[Any] = cur_pieces[1:]
else:
UpperCamelCase_: List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case_ )
else:
new_pieces.append(snake_case_ )
return new_pieces
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Optional[Any] ):
return self.sp_model.PieceToId(snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : str ):
return self.sp_model.IdToPiece(snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : Optional[int] ):
UpperCamelCase_: List[Any] = """""".join(snake_case_ ).replace(snake_case_ , """ """ ).strip()
return out_string
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: List[Any] = [self.sep_token_id]
UpperCamelCase_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is not None:
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1, 1]
return ([0] * len(snake_case_ )) + [1, 1]
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: str = [self.sep_token_id]
UpperCamelCase_: Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase__ ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_: Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , """wb""" ) as fi:
UpperCamelCase_: Any = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def lowerCAmelCase__ ( self : Union[str, Any] , *snake_case_ : List[Any] , **snake_case_ : List[str] ):
UpperCamelCase_: List[str] = super()._decode(*snake_case_ , **snake_case_ )
UpperCamelCase_: Tuple = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case = None, snake_case = None ):
"""simple docstring"""
if start is None:
__magic_name__ :int = 0
if end is None:
__magic_name__ :Optional[Any] = len(snake_case ) - 1
if start >= end:
return
__magic_name__ :Tuple = (start + end) // 2
slowsort(snake_case, snake_case, snake_case )
slowsort(snake_case, mid + 1, snake_case )
if sequence[end] < sequence[mid]:
__magic_name__ , __magic_name__ :List[Any] = sequence[mid], sequence[end]
slowsort(snake_case, snake_case, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : int = str(snake_case__ )
return len(snake_case__ ) == 9 and set(snake_case__ ) == set("""123456789""" )
def UpperCAmelCase__ ():
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_snake_case : List[Any] = 10_00_02 * base_num
if is_9_pandigital(snake_case__ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_snake_case : List[str] = 1_00_20_03 * base_num
if is_9_pandigital(snake_case__ ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 609 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowercase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowercase : List[str] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> List[str]:
__snake_case = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
__snake_case = self.transformer_dir
shutil.copy(
os.path.join(A_ , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def lowercase ( self : Any ) -> int:
__snake_case = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def lowercase ( self : str , A_ : List[str] , A_ : List[Any] , A_ : Optional[Any] , A_ : str=None ) -> int:
__snake_case = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
__snake_case = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
__snake_case = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__snake_case = black.format_str(A_ , mode=A_ )
__snake_case = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(A_ , '''w''' , newline='''\n''' ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , '''r''' ) as f:
self.assertTrue(f.read() , A_ )
def lowercase ( self : str ) -> Optional[int]:
__snake_case = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(A_ , A_ )
def lowercase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , A_ ) , )
# Copy consistency with a really long name
__snake_case = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , A_ , overwrite_result=re.sub('''Bert''' , '''TestModel''' , A_ ) , )
def lowercase ( self : Any ) -> str:
__snake_case = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
__snake_case = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
__snake_case = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__snake_case = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
__snake_case , __snake_case = check_copies.convert_to_localized_md(
A_ , A_ , localized_readme['''format_model_list'''] )
self.assertFalse(A_ )
self.assertEqual(A_ , A_ )
__snake_case , __snake_case = check_copies.convert_to_localized_md(
A_ , A_ , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(A_ )
__snake_case = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
__snake_case = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__snake_case = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__snake_case , __snake_case = check_copies.convert_to_localized_md(
A_ , A_ , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(A_ , A_ ) | 93 | """simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> str:
__snake_case = tempfile.mkdtemp()
__snake_case = BlipImageProcessor()
__snake_case = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__snake_case = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__snake_case = InstructBlipProcessor(A_ , A_ , A_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple , **A_ : str ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).tokenizer
def lowercase ( self : Union[str, Any] , **A_ : Tuple ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor
def lowercase ( self : Union[str, Any] , **A_ : Tuple ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).qformer_tokenizer
def lowercase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Optional[int] ) -> Tuple:
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : int ) -> Dict:
__snake_case = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__snake_case = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__snake_case = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
self.assertIsInstance(processor.qformer_tokenizer , A_ )
def lowercase ( self : int ) -> str:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(A_ , return_tensors='''np''' )
__snake_case = processor(images=A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase ( self : List[str] ) -> Optional[int]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = '''lower newer'''
__snake_case = processor(text=A_ )
__snake_case = tokenizer(A_ , return_token_type_ids=A_ )
__snake_case = qformer_tokenizer(A_ , return_token_type_ids=A_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def lowercase ( self : List[str] ) -> int:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowercase ( self : str ) -> Union[str, Any]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(A_ )
__snake_case = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def lowercase ( self : int ) -> List[str]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) | 93 | 1 |
import random
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ):
_snake_case : dict = {i: [] for i in range(__lowerCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__lowerCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__lowerCAmelCase ):
for j in range(i + 1 , __lowerCAmelCase ):
if random.random() < probability:
graph[i].append(__lowerCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__lowerCAmelCase )
return graph
def A__( __lowerCAmelCase ):
return {
i: [j for j in range(__lowerCAmelCase ) if i != j] for i in range(__lowerCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = "openai/whisper-base"
_UpperCamelCase : List[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_UpperCamelCase : Union[str, Any] = "transcriber"
_UpperCamelCase : Tuple = WhisperProcessor
_UpperCamelCase : Optional[Any] = WhisperForConditionalGeneration
_UpperCamelCase : Union[str, Any] = ["audio"]
_UpperCamelCase : Any = ["text"]
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase_ , return_tensors='pt' ).input_features
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )[0]
| 304 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ) -> Tuple:
_enforce_args(lowerCamelCase__ , lowerCamelCase__ )
if n == 0:
return 0
__a : Tuple = float('-inf' )
for i in range(1 , n + 1 ):
__a : Tuple = max(
lowerCamelCase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCamelCase__ ) )
return max_revue
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] ) -> Any:
_enforce_args(lowerCamelCase__ , lowerCamelCase__ )
__a : str = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ) -> Dict:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__a : Tuple = float('-inf' )
for i in range(1 , n + 1 ):
__a : Tuple = max(
lowerCamelCase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCamelCase__ , lowerCamelCase__ ) , )
__a : Dict = max_revenue
return max_rev[n]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ) -> str:
_enforce_args(lowerCamelCase__ , lowerCamelCase__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__a : int = [float('-inf' ) for _ in range(n + 1 )]
__a : Optional[Any] = 0
for i in range(1 , n + 1 ):
__a : Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
__a : Any = max(lowerCamelCase__ , prices[j - 1] + max_rev[i - j] )
__a : List[Any] = max_revenue_i
return max_rev[n]
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] ) -> Dict:
if n < 0:
__a : List[str] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(lowerCamelCase__ )
if n > len(lowerCamelCase__ ):
__a : List[Any] = (
'Each integral piece of rod must have a corresponding price. '
f'''Got n = {n} but length of prices = {len(lowerCamelCase__ )}'''
)
raise ValueError(lowerCamelCase__ )
def UpperCAmelCase__ ( ) -> Union[str, Any]:
__a : Optional[Any] = [6, 1_0, 1_2, 1_5, 2_0, 2_3]
__a : Optional[int] = len(lowerCamelCase__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__a : List[str] = 3_6
__a : Tuple = top_down_cut_rod(lowerCamelCase__ , lowerCamelCase__ )
__a : Dict = bottom_up_cut_rod(lowerCamelCase__ , lowerCamelCase__ )
__a : List[Any] = naive_cut_rod_recursive(lowerCamelCase__ , lowerCamelCase__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 721 |
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : Union[str, Any] = len(lowerCamelCase_ ) + 1
__a : Tuple = len(lowerCamelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__a : Optional[Any] = [[0 for i in range(lowerCamelCase_ )] for j in range(lowerCamelCase_ )]
# since string of zero length match pattern of zero length
__a : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCamelCase_ ):
__a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCamelCase_ ):
__a : List[str] = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCamelCase_ ):
for j in range(1 , lowerCamelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__a : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__a : Optional[int] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__a : Tuple = dp[i - 1][j]
else:
__a : Tuple = 0
else:
__a : Optional[int] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE__ = '''aab'''
SCREAMING_SNAKE_CASE__ = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 577 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class _a ( lowerCAmelCase_ ):
a_ : Any = "markuplm"
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : int=30_72 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : str=1e-12 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : List[Any]=2_56 , SCREAMING_SNAKE_CASE__ : Dict=10_24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_16 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10_01 , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : str=50 , SCREAMING_SNAKE_CASE__ : str="absolute" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : int , ):
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ , )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = classifier_dropout
# additional properties
lowerCamelCase__ = max_depth
lowerCamelCase__ = max_xpath_tag_unit_embeddings
lowerCamelCase__ = max_xpath_subs_unit_embeddings
lowerCamelCase__ = tag_pad_id
lowerCamelCase__ = subs_pad_id
lowerCamelCase__ = xpath_unit_hidden_size
| 510 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A : Union[str, Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['LayoutLMv2FeatureExtractor']
__A : Union[str, Any] = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 575 | 0 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not isinstance(__a , __a ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
__snake_case : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 | from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : List[Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : List[str] = False
A : List[Any] = False
def snake_case__ ( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=False ):
__snake_case : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
__snake_case : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int=13 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=5_12 , _lowerCAmelCase : str=16 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Tuple=None , ):
__snake_case : int = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : Dict = seq_length
__snake_case : Optional[int] = is_training
__snake_case : str = use_input_mask
__snake_case : Optional[Any] = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : Any = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : List[str] = num_labels
__snake_case : str = num_choices
__snake_case : Optional[int] = scope
__snake_case : Any = embedding_size
def snake_case__ ( self : str ):
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[Any] = None
if self.use_token_type_ids:
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : Tuple = None
__snake_case : int = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : int = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
__snake_case : Optional[int] = TFMobileBertModel(config=_lowerCAmelCase )
__snake_case : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[Any] = model(_lowerCAmelCase )
__snake_case : Tuple = [input_ids, input_mask]
__snake_case : Tuple = model(_lowerCAmelCase )
__snake_case : int = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
__snake_case : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
__snake_case : str = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
__snake_case : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : str = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] ):
__snake_case : Any = TFMobileBertForPreTraining(config=_lowerCAmelCase )
__snake_case : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : List[str] = self.num_labels
__snake_case : Optional[int] = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
__snake_case : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ):
__snake_case : Any = self.num_choices
__snake_case : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
__snake_case : Dict = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : int = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : Dict = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__snake_case : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ):
__snake_case : List[str] = self.num_labels
__snake_case : Any = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
__snake_case : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
__snake_case : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
__snake_case : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def snake_case__ ( self : int ):
__snake_case : Optional[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__snake_case : int = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def snake_case__ ( self : List[str] ):
self.config_tester.run_common_tests()
def snake_case__ ( self : str ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def snake_case__ ( self : int ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def snake_case__ ( self : Tuple ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__snake_case : Dict = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def snake_case__ ( self : Optional[Any] ):
__snake_case : int = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
__snake_case : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__snake_case : Optional[int] = model(_lowerCAmelCase )[0]
__snake_case : List[str] = [1, 6, 3_05_22]
self.assertEqual(output.shape , _lowerCAmelCase )
__snake_case : List[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 )
| 390 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : int = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase__ : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : str = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowercase__ : Dict = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : List[str] = LxmertTokenizer
def __init__( self : Dict , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , lowercase_ : int=True , lowercase_ : List[str]="[UNK]" , lowercase_ : int="[SEP]" , lowercase_ : Union[str, Any]="[PAD]" , lowercase_ : Optional[int]="[CLS]" , lowercase_ : Union[str, Any]="[MASK]" , lowercase_ : Tuple=True , lowercase_ : Tuple=None , **lowercase_ : Union[str, Any] , ):
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowercase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase_ ) != tokenize_chinese_chars
):
snake_case_ : int = getattr(lowercase_ , normalizer_state.pop('''type''' ) )
snake_case_ : str = do_lower_case
snake_case_ : int = strip_accents
snake_case_ : str = tokenize_chinese_chars
snake_case_ : Tuple = normalizer_class(**lowercase_ )
snake_case_ : Any = do_lower_case
def _snake_case ( self : str , lowercase_ : Any , lowercase_ : List[str]=None ):
snake_case_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : str = [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ):
snake_case_ : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 123 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 96 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_lowercase = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_lowercase = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_lowercase = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_lowercase = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def _snake_case (self , __magic_name__ ):
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__=0.9 , __magic_name__=3 , __magic_name__=0.5 ):
if NLTK_VERSION >= version.Version("""3.6.5""" ):
lowerCamelCase__ : List[str] = [
meteor_score.single_meteor_score(
word_tokenize(__magic_name__ ) , word_tokenize(__magic_name__ ) , alpha=__magic_name__ , beta=__magic_name__ , gamma=__magic_name__ )
for ref, pred in zip(__magic_name__ , __magic_name__ )
]
else:
lowerCamelCase__ : int = [
meteor_score.single_meteor_score(__magic_name__ , __magic_name__ , alpha=__magic_name__ , beta=__magic_name__ , gamma=__magic_name__ )
for ref, pred in zip(__magic_name__ , __magic_name__ )
]
return {"meteor": np.mean(__magic_name__ )}
| 96 | 1 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( snake_case__ , unittest.TestCase ):
_a : Optional[Any] = LxmertTokenizer
_a : int = LxmertTokenizerFast
_a : Any = True
_a : Union[str, Any] = True
def __a ( self : Any ):
super().setUp()
lowerCamelCase_ : Optional[int] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self : Union[str, Any] , lowerCamelCase : List[Any] ):
lowerCamelCase_ : Dict = 'UNwant\u00E9d,running'
lowerCamelCase_ : List[Any] = 'unwanted, running'
return input_text, output_text
def __a ( self : Any ):
lowerCamelCase_ : Optional[Any] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[int] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCamelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def __a ( self : List[str] ):
if not self.test_rust_tokenizer:
return
lowerCamelCase_ : List[str] = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[str] = 'I was born in 92000, and this is falsé.'
lowerCamelCase_ : List[Any] = tokenizer.tokenize(lowerCamelCase )
lowerCamelCase_ : Optional[Any] = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Optional[int] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : int = self.get_rust_tokenizer()
lowerCamelCase_ : Any = tokenizer.encode(lowerCamelCase )
lowerCamelCase_ : int = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 364 |
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Dict = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase_ : Optional[int] = len(lowerCAmelCase__ ) if (len(lowerCAmelCase__ ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) ,'Stack'.center(lowerCAmelCase__ ) ,'Postfix'.center(lowerCAmelCase__ ) ,sep=' | ' ,)
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCAmelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCAmelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCAmelCase__ ) == 0:
stack.append(lowerCAmelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCAmelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCAmelCase__ ) # push x to stack
print(
x.center(8 ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,sep=' | ' ,) # Output in tabular format
while len(lowerCAmelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,sep=' | ' ,) # Output in tabular format
return "".join(lowerCAmelCase__ ) # return Postfix as str
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Dict = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCAmelCase__ ) ):
if infix[i] == "(":
lowerCamelCase_ : str = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase_ : Optional[Any] = '(' # change ")" to "("
return (infix_2_postfix(''.join(lowerCAmelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_lowercase : int =input("""\nEnter an Infix Equation = """) # Input an Infix equation
_lowercase : Optional[Any] ="""""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 364 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowercase__ = 0
lowercase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowercase__ = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> None:
_a = pos_x
_a = pos_y
_a = (pos_y, pos_x)
_a = goal_x
_a = goal_y
_a = g_cost
_a = parent
_a = self.calculate_heuristic()
_a = self.g_cost + self.h_cost
def a_ ( self ) -> float:
_a = self.pos_x - self.goal_x
_a = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__UpperCamelCase ) + abs(__UpperCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , __UpperCamelCase ) -> bool:
return self.f_cost < other.f_cost
class __SCREAMING_SNAKE_CASE :
def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
_a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __UpperCamelCase )
_a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __UpperCamelCase )
_a = [self.start]
_a = []
_a = False
def a_ ( self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__UpperCamelCase )
self.closed_nodes.append(__UpperCamelCase )
_a = self.get_successors(__UpperCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__UpperCamelCase )
else:
# retrieve the best current path
_a = self.open_nodes.pop(self.open_nodes.index(__UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__UpperCamelCase )
else:
self.open_nodes.append(__UpperCamelCase )
return [self.start.pos]
def a_ ( self , __UpperCamelCase ) -> list[Node]:
_a = []
for action in delta:
_a = parent.pos_x + action[1]
_a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__UpperCamelCase , __UpperCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __UpperCamelCase , ) )
return successors
def a_ ( self , __UpperCamelCase ) -> list[TPosition]:
_a = node
_a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a = current_node.parent
path.reverse()
return path
class __SCREAMING_SNAKE_CASE :
def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> None:
_a = AStar(__UpperCamelCase , __UpperCamelCase )
_a = AStar(__UpperCamelCase , __UpperCamelCase )
_a = False
def a_ ( self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_a = self.fwd_astar.open_nodes.pop(0 )
_a = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__UpperCamelCase , __UpperCamelCase )
self.fwd_astar.closed_nodes.append(__UpperCamelCase )
self.bwd_astar.closed_nodes.append(__UpperCamelCase )
_a = current_bwd_node
_a = current_fwd_node
_a = {
self.fwd_astar: self.fwd_astar.get_successors(__UpperCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__UpperCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__UpperCamelCase )
else:
# retrieve the best current path
_a = astar.open_nodes.pop(
astar.open_nodes.index(__UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__UpperCamelCase )
else:
astar.open_nodes.append(__UpperCamelCase )
return [self.fwd_astar.start.pos]
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> list[TPosition]:
_a = self.fwd_astar.retrace_path(__UpperCamelCase )
_a = self.bwd_astar.retrace_path(__UpperCamelCase )
bwd_path.pop()
bwd_path.reverse()
_a = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowercase__ = (0, 0)
lowercase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase__ = time.time()
lowercase__ = AStar(init, goal)
lowercase__ = a_star.search()
lowercase__ = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
lowercase__ = time.time()
lowercase__ = BidirectionalAStar(init, goal)
lowercase__ = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 276 |
'''simple docstring'''
def __UpperCamelCase ( __lowerCamelCase : int = 400_0000 ) -> int:
'''simple docstring'''
_a = []
_a , _a = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
_a , _a = b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 276 | 1 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
def _lowercase ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' ,type=__lowerCamelCase ,default='''data/dump.txt''' ,help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' ,type=__lowerCamelCase ,default='''bert''' ,choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' ,type=__lowerCamelCase ,default='''bert-base-uncased''' ,help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' ,type=__lowerCamelCase ,default='''data/dump''' ,help='''The dump file prefix.''' )
UpperCamelCase__ : int = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
UpperCamelCase__ : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ : int = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
UpperCamelCase__ : Tuple = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCamelCase__ : str = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
UpperCamelCase__ : str = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCamelCase__ : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ : Dict = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
UpperCamelCase__ : List[str] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path ,'''r''' ,encoding='''utf8''' ) as fp:
UpperCamelCase__ : Tuple = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'{len(__lowerCamelCase )} examples to process.' )
UpperCamelCase__ : Any = []
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : Union[str, Any] = 10000
UpperCamelCase__ : str = time.time()
for text in data:
UpperCamelCase__ : Optional[int] = F'{bos} {text.strip()} {sep}'
UpperCamelCase__ : int = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
rslt.append(__lowerCamelCase )
iter += 1
if iter % interval == 0:
UpperCamelCase__ : int = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
UpperCamelCase__ : List[str] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'{len(__lowerCamelCase )} examples processed.' )
UpperCamelCase__ : Any = F'{args.dump_file}.{args.tokenizer_name}.pickle'
UpperCamelCase__ : Union[str, Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCamelCase__ : Optional[int] = [np.uintaa(__lowerCamelCase ) for d in rslt]
else:
UpperCamelCase__ : List[Any] = [np.intaa(__lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(__lowerCamelCase ,'''wb''' ) as handle:
pickle.dump(rslt_ ,__lowerCamelCase ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 344 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_SCREAMING_SNAKE_CASE : str = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 344 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCamelCase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE = 'lm_head'
SCREAMING_SNAKE_CASE = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(SCREAMING_SNAKE_CASE_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace('*' , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE = 'weight'
else:
SCREAMING_SNAKE_CASE = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
SCREAMING_SNAKE_CASE = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE = name.split('.' )
SCREAMING_SNAKE_CASE = int(items[0] )
SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : List[str]=True ) -> List[Any]:
if config_path is not None:
SCREAMING_SNAKE_CASE = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE = Dictionary.load_from_json(SCREAMING_SNAKE_CASE_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE = target_dict.pad_index
SCREAMING_SNAKE_CASE = target_dict.bos_index
SCREAMING_SNAKE_CASE = target_dict.eos_index
SCREAMING_SNAKE_CASE = len(target_dict.symbols )
SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE_ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 43
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = UniSpeechForCTC(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE_ )
if is_finetuned:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 327 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 327 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
_lowerCamelCase : List[str] = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
_lowerCamelCase : Tuple = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[Any]:
_lowerCamelCase : List[Any] = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def UpperCamelCase ( ) ->List[str]:
_lowerCamelCase : int = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
_lowerCamelCase : str = '''imagenet-1k-id2label.json'''
_lowerCamelCase : Optional[int] = 1000
_lowerCamelCase : Tuple = '''huggingface/label-files'''
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) ) , '''r''' ) )
_lowerCamelCase : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_lowerCamelCase : List[Any] = idalabel
_lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = CvtConfig(num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
_lowerCamelCase : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
_lowerCamelCase : Union[str, Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_lowerCamelCase : List[str] = [2, 2, 20]
_lowerCamelCase : Union[str, Any] = [3, 12, 16]
_lowerCamelCase : Optional[Any] = [192, 768, 1024]
_lowerCamelCase : Any = CvtForImageClassification(lowerCAmelCase_ )
_lowerCamelCase : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
_lowerCamelCase : List[Any] = image_size
_lowerCamelCase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=torch.device('''cpu''' ) )
_lowerCamelCase : List[Any] = OrderedDict()
_lowerCamelCase : str = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_lowerCamelCase : List[Any] = list_of_state_dict + cls_token(lowerCAmelCase_ )
_lowerCamelCase : str = list_of_state_dict + embeddings(lowerCAmelCase_ )
for cnt in range(config.depth[idx] ):
_lowerCamelCase : Any = list_of_state_dict + attention(lowerCAmelCase_ , lowerCAmelCase_ )
_lowerCamelCase : Any = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
_lowerCamelCase : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : int =parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 434 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = use_mc_token_ids
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLModel(config=A_ )
model.to(A_ )
model.eval()
model(A_ , token_type_ids=A_ , head_mask=A_ )
model(A_ , token_type_ids=A_ )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLLMHeadModel(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def lowercase_ ( self , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = CTRLForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCamelCase__ : List[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCamelCase__ : Tuple = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , n_embd=37 )
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = CTRLModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase_ ( self ):
'''simple docstring'''
pass
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(A_ )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=A_ ) # Legal the president is
SCREAMING_SNAKE_CASE__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE__ = model.generate(A_ , do_sample=A_ )
self.assertListEqual(output_ids[0].tolist() , A_ )
| 100 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : List[Any] = ["pixel_values"]
def __init__( self , a = True , a = None , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ) -> None:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = size if size is not None else {"""shortest_edge""": 3_84}
_UpperCamelCase = get_size_dict(a , default_to_square=a )
_UpperCamelCase = do_resize
_UpperCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCamelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , a , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_UpperCamelCase = size["""shortest_edge"""]
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCamelCase = int(shortest_edge / crop_pct )
_UpperCamelCase = get_resize_output_image_size(a , size=a , default_to_square=a )
_UpperCamelCase = resize(image=a , size=a , resample=a , data_format=a , **a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=a , size=(shortest_edge, shortest_edge) , data_format=a , **a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
a , size=(shortest_edge, shortest_edge) , resample=a , data_format=a , **a )
def A_ ( self , a , a , a = None , **a , ) -> List[Any]:
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self , a , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(a , default_to_square=a )
_UpperCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(a , a ) for image in images]
_UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=a , tensor_type=a )
| 202 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , a , a ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = 1_00 , a = None , a = None , a = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
_UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate
_UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate
_UpperCamelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
_UpperCamelCase = int(a )
if sample_size % down_scale_factor != 0:
_UpperCamelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
""" process.""" )
_UpperCamelCase = int(a )
_UpperCamelCase = next(iter(self.unet.parameters() ) ).dtype
_UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(a )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_UpperCamelCase = randn_tensor(a , generator=a , device=self.device , dtype=a )
# set step values
self.scheduler.set_timesteps(a , device=audio.device )
_UpperCamelCase = self.scheduler.timesteps.to(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCamelCase = self.unet(a , a ).sample
# 2. compute previous image: x_t -> t_t-1
_UpperCamelCase = self.scheduler.step(a , a , a ).prev_sample
_UpperCamelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
_UpperCamelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=a )
| 202 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case = random.Random()
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ):
"""simple docstring"""
if rng is None:
_lowerCAmelCase : List[Any] = global_rng
_lowerCAmelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=7 , _snake_case=400 , _snake_case=2000 , _snake_case=1 , _snake_case=0.0 , _snake_case=1_6000 , _snake_case=True , _snake_case=True , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Any = min_seq_length
_lowerCAmelCase : List[str] = max_seq_length
_lowerCAmelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase : Optional[Any] = feature_size
_lowerCAmelCase : Optional[int] = padding_value
_lowerCAmelCase : List[str] = sampling_rate
_lowerCAmelCase : Optional[Any] = return_attention_mask
_lowerCAmelCase : List[str] = do_normalize
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case=False , _snake_case=False ):
def _flatten(_snake_case ):
return list(itertools.chain(*_snake_case ) )
if equal_length:
_lowerCAmelCase : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCAmelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : Tuple = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = WavaVecaFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = WavaVecaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
self.assertTrue(np.all(np.mean(_snake_case , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_snake_case , axis=0 ) - 1 ) < 1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : List[str] = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test not batched input
_lowerCAmelCase : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
_lowerCAmelCase : Optional[int] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# Test batched
_lowerCAmelCase : int = feat_extract(_snake_case , return_tensors="np" ).input_values
_lowerCAmelCase : Dict = feat_extract(_snake_case , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ):
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase : int = np.asarray(_snake_case )
_lowerCAmelCase : Tuple = feat_extract(_snake_case , return_tensors="np" ).input_values
_lowerCAmelCase : Any = feat_extract(_snake_case , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ):
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : List[Any] = ["longest", "max_length", "do_not_pad"]
_lowerCAmelCase : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(_snake_case , _snake_case ):
_lowerCAmelCase : int = feat_extract(_snake_case , padding=_snake_case , max_length=_snake_case , return_tensors="np" )
_lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Optional[Any] = range(800 , 1400 , 200 )
_lowerCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in lengths]
_lowerCAmelCase : int = ["longest", "max_length", "do_not_pad"]
_lowerCAmelCase : Optional[int] = [None, 1600, None]
for max_length, padding in zip(_snake_case , _snake_case ):
_lowerCAmelCase : Union[str, Any] = feat_extract(_snake_case , max_length=_snake_case , padding=_snake_case )
_lowerCAmelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Optional[int] = feat_extract(
_snake_case , truncation=_snake_case , max_length=1000 , padding="max_length" , return_tensors="np" )
_lowerCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Optional[Any] = feat_extract(
_snake_case , truncation=_snake_case , max_length=1000 , padding="longest" , return_tensors="np" )
_lowerCAmelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Union[str, Any] = feat_extract(
_snake_case , truncation=_snake_case , max_length=2000 , padding="longest" , return_tensors="np" )
_lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
import torch
_lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Union[str, Any] = np.random.rand(100 ).astype(np.floataa )
_lowerCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase : Optional[int] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCAmelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_lowerCAmelCase : List[str] = WavaVecaConfig.from_pretrained(_snake_case )
_lowerCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained(_snake_case )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer" )
| 424 | from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 424 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[int] = """"""
for i in table:
res += inp[i - 1]
return res
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return data[1:] + data[0]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = """"""
for i in range(len(__UpperCAmelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[Any] = int("""0b""" + data[0] + data[-1] , 2 )
_lowercase : Optional[int] = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = message[:4]
_lowercase : List[Any] = message[4:]
_lowercase : Optional[Any] = apply_table(__UpperCAmelCase , __UpperCAmelCase )
_lowercase : Dict = xor(__UpperCAmelCase , __UpperCAmelCase )
_lowercase : Union[str, Any] = apply_sbox(__UpperCAmelCase , temp[:4] ) # noqa: E741
_lowercase : Tuple = apply_sbox(__UpperCAmelCase , temp[4:] )
_lowercase : Union[str, Any] = """0""" * (2 - len(__UpperCAmelCase )) + l # noqa: E741
_lowercase : List[Any] = """0""" * (2 - len(__UpperCAmelCase )) + r
_lowercase : Tuple = apply_table(l + r , __UpperCAmelCase )
_lowercase : Any = xor(__UpperCAmelCase , __UpperCAmelCase )
return temp + right
if __name__ == "__main__":
UpperCAmelCase: List[str] = input("""Enter 10 bit key: """)
UpperCAmelCase: Union[str, Any] = input("""Enter 8 bit message: """)
UpperCAmelCase: List[str] = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCAmelCase: Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCAmelCase: List[Any] = [2, 4, 3, 1]
UpperCAmelCase: List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCAmelCase: List[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCAmelCase: Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCAmelCase: Optional[int] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCAmelCase: str = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCAmelCase: List[Any] = apply_table(key, paa_table)
UpperCAmelCase: Optional[Any] = temp[:5]
UpperCAmelCase: List[Any] = temp[5:]
UpperCAmelCase: Dict = left_shift(left)
UpperCAmelCase: Optional[Any] = left_shift(right)
UpperCAmelCase: Optional[int] = apply_table(left + right, pa_table)
UpperCAmelCase: Dict = left_shift(left)
UpperCAmelCase: Dict = left_shift(right)
UpperCAmelCase: Union[str, Any] = left_shift(left)
UpperCAmelCase: List[str] = left_shift(right)
UpperCAmelCase: Optional[Any] = apply_table(left + right, pa_table)
# encryption
UpperCAmelCase: Optional[Any] = apply_table(message, IP)
UpperCAmelCase: int = function(expansion, sa, sa, keya, temp)
UpperCAmelCase: Union[str, Any] = temp[4:] + temp[:4]
UpperCAmelCase: Dict = function(expansion, sa, sa, keya, temp)
UpperCAmelCase: Any = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
UpperCAmelCase: Union[str, Any] = apply_table(CT, IP)
UpperCAmelCase: Any = function(expansion, sa, sa, keya, temp)
UpperCAmelCase: Dict = temp[4:] + temp[:4]
UpperCAmelCase: Optional[int] = function(expansion, sa, sa, keya, temp)
UpperCAmelCase: Optional[Any] = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 717 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: str = logging.get_logger(__name__)
UpperCAmelCase: Optional[Any] = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "wav2vec2"
def __init__( self ,UpperCAmelCase_=32 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-5 ,UpperCAmelCase_="group" ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) ,UpperCAmelCase_=(5, 2, 2, 2, 2, 2, 2) ,UpperCAmelCase_=(10, 3, 3, 3, 3, 2, 2) ,UpperCAmelCase_=False ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=16 ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=0.05 ,UpperCAmelCase_=10 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=10 ,UpperCAmelCase_=0 ,UpperCAmelCase_=3_20 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_="sum" ,UpperCAmelCase_=False ,UpperCAmelCase_=False ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) ,UpperCAmelCase_=(5, 3, 3, 1, 1) ,UpperCAmelCase_=(1, 2, 3, 1, 1) ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2 ,UpperCAmelCase_=False ,UpperCAmelCase_=3 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ ,pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ )
_lowercase : List[Any] = hidden_size
_lowercase : Any = feat_extract_norm
_lowercase : Tuple = feat_extract_activation
_lowercase : Tuple = list(UpperCAmelCase_ )
_lowercase : List[str] = list(UpperCAmelCase_ )
_lowercase : List[Any] = list(UpperCAmelCase_ )
_lowercase : List[Any] = conv_bias
_lowercase : Optional[Any] = num_conv_pos_embeddings
_lowercase : Dict = num_conv_pos_embedding_groups
_lowercase : List[Any] = len(self.conv_dim )
_lowercase : str = num_hidden_layers
_lowercase : Any = intermediate_size
_lowercase : int = hidden_act
_lowercase : int = num_attention_heads
_lowercase : Union[str, Any] = hidden_dropout
_lowercase : Dict = attention_dropout
_lowercase : Tuple = activation_dropout
_lowercase : str = feat_proj_dropout
_lowercase : List[str] = final_dropout
_lowercase : Tuple = layerdrop
_lowercase : List[str] = layer_norm_eps
_lowercase : Any = initializer_range
_lowercase : Any = vocab_size
_lowercase : Optional[Any] = do_stable_layer_norm
_lowercase : Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase : Union[str, Any] = apply_spec_augment
_lowercase : Optional[Any] = mask_time_prob
_lowercase : Optional[int] = mask_time_length
_lowercase : Dict = mask_time_min_masks
_lowercase : Optional[int] = mask_feature_prob
_lowercase : Tuple = mask_feature_length
_lowercase : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowercase : str = num_codevectors_per_group
_lowercase : Union[str, Any] = num_codevector_groups
_lowercase : Optional[Any] = contrastive_logits_temperature
_lowercase : Tuple = feat_quantizer_dropout
_lowercase : Optional[int] = num_negatives
_lowercase : str = codevector_dim
_lowercase : Optional[int] = proj_codevector_dim
_lowercase : int = diversity_loss_weight
# ctc loss
_lowercase : Optional[int] = ctc_loss_reduction
_lowercase : str = ctc_zero_infinity
# adapter
_lowercase : str = add_adapter
_lowercase : List[str] = adapter_kernel_size
_lowercase : Any = adapter_stride
_lowercase : List[Any] = num_adapter_layers
_lowercase : Optional[Any] = output_hidden_size or hidden_size
_lowercase : str = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase : List[str] = list(UpperCAmelCase_ )
_lowercase : List[Any] = list(UpperCAmelCase_ )
_lowercase : Tuple = list(UpperCAmelCase_ )
_lowercase : List[Any] = xvector_output_dim
@property
def lowerCamelCase__ ( self ):
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 600 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class A__( __magic_name__ ):
lowerCAmelCase = '''van'''
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any=2_24 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Optional[int]=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Dict=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : int=[64, 1_28, 3_20, 5_12] , __SCREAMING_SNAKE_CASE : Dict=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Tuple=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-6 , __SCREAMING_SNAKE_CASE : str=1E-2 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : List[str] , ) -> str:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = strides
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_ratios
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = dropout_rate
| 482 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCAmelCase__ =None
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ="▁"
lowerCAmelCase__ ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase__ ={
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
lowerCAmelCase__ ={
"google/pegasus-xsum": 512,
}
class A__( __magic_name__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = PegasusTokenizer
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]="<pad>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]="<mask_2>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<mask_1>" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]=1_03 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = offset
if additional_special_tokens is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__SCREAMING_SNAKE_CASE )}, but is"""
f""" {type(__SCREAMING_SNAKE_CASE )}""" )
__SCREAMING_SNAKE_CASE = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(__SCREAMING_SNAKE_CASE ) ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__SCREAMING_SNAKE_CASE = additional_special_tokens_extended
else:
__SCREAMING_SNAKE_CASE = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , mask_token_sent=__SCREAMING_SNAKE_CASE , offset=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List , __SCREAMING_SNAKE_CASE : Optional[List] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(__SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 482 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int) -> None:
"""simple docstring"""
lowercase__ = value
lowercase__ = None
lowercase__ = None
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : Node) -> None:
"""simple docstring"""
lowercase__ = tree
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Node | None) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self : Optional[Any]) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642 | 0 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCAmelCase_ : int = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 570 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str , __A : str , __A : PreTrainedTokenizer , __A : int , __A : Optional[int] = None , ) -> int:
"""simple docstring"""
a_ : List[str] = {}
if train_file is not None:
a_ : Dict = [train_file]
if eval_file is not None:
a_ : str = [eval_file]
if test_file is not None:
a_ : Any = [test_file]
a_ : Any = datasets.load_dataset('csv' , data_files=__A )
a_ : Optional[int] = list(ds[list(files.keys() )[0]].features.keys() )
a_ : Any = features_name.pop(__A )
a_ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
a_ : str = {label: i for i, label in enumerate(__A )}
a_ : Optional[Any] = tokenizer.model_input_names
a_ : Union[str, Any] = {}
if len(__A ) == 1:
for k in files.keys():
a_ : Union[str, Any] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__A , max_length=__A , padding='max_length' ) , batched=__A , )
elif len(__A ) == 2:
for k in files.keys():
a_ : Optional[Any] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__A , max_length=__A , padding='max_length' , ) , batched=__A , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a_ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a_ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a_ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a_ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a_ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
a_ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
a_ : List[str] = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a_ : List[str] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a_ : Optional[int] = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a_ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a_ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a_ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ : List[Any] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : int = field(metadata={'''help''': '''Which column contains the label'''} )
snake_case__ : str = field(default=lowercase__ , metadata={'''help''': '''The path of the training file'''} )
snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The path of the development file'''} )
snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The path of the test file'''} )
snake_case__ : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ : bool = field(
default=lowercase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case__ : bool = field(default=lowercase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
a_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a_ , a_ , a_ : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ , a_ , a_ , a_ : Any = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a_ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__A ) , labelaid=__A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a_ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , )
def compute_metrics(__A : EvalPrediction ) -> Dict:
a_ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a_ : str = TFTrainer(
model=__A , args=__A , train_dataset=__A , eval_dataset=__A , compute_metrics=__A , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a_ : int = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a_ : Optional[int] = trainer.evaluate()
a_ : int = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(__A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(__A )
return results
if __name__ == "__main__":
main()
| 570 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( A_ , A_ , A_ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = StableDiffusionInstructPixaPixPipeline
_snake_case : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_snake_case : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self :Dict ):
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase__ :int = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
UpperCamelCase__ :List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCamelCase__ :str = CLIPTextModel(lowerCamelCase__ )
UpperCamelCase__ :List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase__ :Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __a ( self :Dict , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=0 ):
UpperCamelCase__ :Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCamelCase__ :Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :Tuple = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" )
if str(lowerCamelCase__ ).startswith("""mps""" ):
UpperCamelCase__ :List[str] = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ :Optional[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCamelCase__ :str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :List[Any] = self.get_dummy_components()
UpperCamelCase__ :int = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
UpperCamelCase__ :List[Any] = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = self.get_dummy_inputs(lowerCamelCase__ )
UpperCamelCase__ :Any = sd_pipe(**lowerCamelCase__ ).images
UpperCamelCase__ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __a ( self :int ):
UpperCamelCase__ :Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Tuple = self.get_dummy_components()
UpperCamelCase__ :Tuple = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
UpperCamelCase__ :Dict = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Any = self.get_dummy_inputs(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = """french fries"""
UpperCamelCase__ :Optional[int] = sd_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = output.images
UpperCamelCase__ :Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :int = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __a ( self :Optional[Any] ):
UpperCamelCase__ :int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Union[str, Any] = self.get_dummy_components()
UpperCamelCase__ :Optional[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
UpperCamelCase__ :List[Any] = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = self.get_dummy_inputs(lowerCamelCase__ )
UpperCamelCase__ :Tuple = [inputs["""prompt"""]] * 2
UpperCamelCase__ :List[Any] = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_55.0
UpperCamelCase__ :List[str] = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 ).to(lowerCamelCase__ )
UpperCamelCase__ :int = image / 2 + 0.5
UpperCamelCase__ :str = image.permute(0 , 3 , 1 , 2 )
UpperCamelCase__ :List[str] = image.repeat(2 , 1 , 1 , 1 )
UpperCamelCase__ :Optional[Any] = sd_pipe(**lowerCamelCase__ ).images
UpperCamelCase__ :Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCamelCase__ :int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :int = self.get_dummy_components()
UpperCamelCase__ :Optional[int] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
UpperCamelCase__ :List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
UpperCamelCase__ :List[Any] = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :List[str] = self.get_dummy_inputs(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = sd_pipe(**lowerCamelCase__ ).images
UpperCamelCase__ :Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ :Union[str, Any] = [round(lowerCamelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCamelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :Optional[int] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __a ( self :str ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __a ( self :str ):
UpperCamelCase__ :Tuple = self.get_dummy_components()
UpperCamelCase__ :Any = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = VaeImageProcessor(do_resize=lowerCamelCase__ , do_normalize=lowerCamelCase__ )
UpperCamelCase__ :int = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" ) )[0]
UpperCamelCase__ :Union[str, Any] = components["""vae"""]
UpperCamelCase__ :int = self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCamelCase__ :Any = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCamelCase__ :Optional[Any] = pipe(**lowerCamelCase__ )[0]
UpperCamelCase__ :List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase__ , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any]=0 ):
UpperCamelCase__ :Optional[Any] = torch.manual_seed(lowerCamelCase__ )
UpperCamelCase__ :Tuple = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
UpperCamelCase__ :List[str] = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def __a ( self :List[str] ):
UpperCamelCase__ :str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase__ :int = self.get_inputs()
UpperCamelCase__ :Optional[int] = pipe(**lowerCamelCase__ ).images
UpperCamelCase__ :Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __a ( self :str ):
UpperCamelCase__ :int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase__ :str = self.get_inputs()
UpperCamelCase__ :Optional[Any] = pipe(**lowerCamelCase__ ).images
UpperCamelCase__ :Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __a ( self :Dict ):
UpperCamelCase__ :Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
UpperCamelCase__ :Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase__ :Optional[int] = self.get_inputs()
UpperCamelCase__ :Dict = pipe(**lowerCamelCase__ ).images
UpperCamelCase__ :Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :str = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[int] = 0
def callback_fn(lowerCamelCase__ :int , lowerCamelCase__ :Dict , lowerCamelCase__ :str ) -> None:
UpperCamelCase__ :Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase__ :List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ :int = latents[0, -3:, -3:, -1]
UpperCamelCase__ :int = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCamelCase__ :Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ :List[Any] = latents[0, -3:, -3:, -1]
UpperCamelCase__ :Tuple = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
UpperCamelCase__ :int = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase__ :Tuple = self.get_inputs()
pipe(**lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self :Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ :List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
UpperCamelCase__ :List[str] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ :int = self.get_inputs()
UpperCamelCase__ :Dict = pipe(**lowerCamelCase__ )
UpperCamelCase__ :List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Dict = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase__ :List[str] = inputs["""image"""].resize((5_04, 5_04) )
UpperCamelCase__ :List[str] = """timbrooks/instruct-pix2pix"""
UpperCamelCase__ :Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase__ , safety_checker=lowerCamelCase__ , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase__ :List[str] = pipe(**lowerCamelCase__ )
UpperCamelCase__ :List[Any] = output.images[0]
UpperCamelCase__ :Optional[Any] = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
UpperCamelCase__ :List[str] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 | 708 |
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Any ):
UpperCamelCase__ :Any = {}
def __a ( self :List[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Any = {}
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :str , lowerCamelCase__ :float ):
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
UpperCamelCase__ :Tuple = probability
def __a ( self :str ):
return list(self.connections )
def __a ( self :Union[str, Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :str = 0
UpperCamelCase__ :Any = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( lowercase__ : str , lowercase__ : list[tuple[str, str, float]] , lowercase__ : int ) -> dict[str, int]:
UpperCamelCase__ :str = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :List[Any] = Counter(graph.get_nodes() )
UpperCamelCase__ :List[str] = start
for _ in range(lowercase__ ):
UpperCamelCase__ :Any = graph.transition(lowercase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 383 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __snake_case ( ) -> Any:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase_ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def __snake_case ( ) -> int:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def __snake_case ( ) -> List[Any]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase_ ):
http_head('''https://huggingface.co''' )
| 100 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[str]:
"""simple docstring"""
if index == r:
for j in range(UpperCAmelCase_ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCAmelCase : Optional[Any] = arr[i]
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , index + 1 , UpperCAmelCase_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 0 , UpperCAmelCase_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase__ :int = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu | 522 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :Union[str, Any] = logging.get_logger(__name__)
__lowercase :str = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class _a ( __lowerCAmelCase ):
"""simple docstring"""
snake_case_ = '''blip_text_model'''
def __init__( self : List[str] , a : Any=3_05_24 , a : Optional[Any]=7_68 , a : Tuple=7_68 , a : Optional[Any]=30_72 , a : Optional[Any]=7_68 , a : Any=12 , a : Optional[int]=8 , a : List[str]=5_12 , a : Any="gelu" , a : Union[str, Any]=1E-12 , a : List[str]=0.0 , a : Union[str, Any]=0.0 , a : Union[str, Any]=0.02 , a : Any=3_05_22 , a : Optional[Any]=2 , a : Tuple=0 , a : int=1_02 , a : Any=True , a : Any=True , **a : int , ) ->Union[str, Any]:
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , sep_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_hidden_size
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = projection_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = is_decoder
SCREAMING_SNAKE_CASE__ : Dict = use_cache
@classmethod
def A_ ( cls : Union[str, Any] , a : Optional[Any] , **a : Tuple ) ->"PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
SCREAMING_SNAKE_CASE__ : List[str] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class _a ( __lowerCAmelCase ):
"""simple docstring"""
snake_case_ = '''blip_vision_model'''
def __init__( self : Any , a : Tuple=7_68 , a : Union[str, Any]=30_72 , a : List[str]=5_12 , a : Optional[int]=12 , a : Dict=12 , a : Union[str, Any]=3_84 , a : Union[str, Any]=16 , a : Optional[Any]="gelu" , a : Union[str, Any]=1E-5 , a : Tuple=0.0 , a : int=1E-10 , **a : int , ) ->Optional[Any]:
super().__init__(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = projection_dim
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : int = hidden_act
@classmethod
def A_ ( cls : Union[str, Any] , a : int , **a : Optional[int] ) ->"PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
SCREAMING_SNAKE_CASE__ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class _a ( __lowerCAmelCase ):
"""simple docstring"""
snake_case_ = '''blip'''
snake_case_ = True
def __init__( self : List[Any] , a : Dict=None , a : Tuple=None , a : Any=5_12 , a : List[str]=2.6592 , a : Optional[int]=2_56 , **a : List[str] , ) ->int:
super().__init__(**lowerCAmelCase_ )
if text_config is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : Dict = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
SCREAMING_SNAKE_CASE__ : List[Any] = BlipTextConfig(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BlipVisionConfig(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = projection_dim
SCREAMING_SNAKE_CASE__ : Any = logit_scale_init_value
SCREAMING_SNAKE_CASE__ : List[Any] = 1.0
SCREAMING_SNAKE_CASE__ : int = 0.02
SCREAMING_SNAKE_CASE__ : int = image_text_hidden_size
@classmethod
def A_ ( cls : List[str] , a : Optional[Any] , a : Optional[int] , **a : List[str] ) ->Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def A_ ( self : Optional[int] ) ->Dict:
SCREAMING_SNAKE_CASE__ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : str = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : int = self.__class__.model_type
return output | 716 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 26 | 0 |
from PIL import Image
def A ( lowercase__ : Image , lowercase__ : float ) -> Image:
def brightness(lowercase__ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png") | 45 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCAmelCase__ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ = 'PoolFormerConfig'
# Base docstring
lowerCAmelCase__ = 'sail/poolformer_s12'
lowerCAmelCase__ = [1, 512, 7, 7]
# Image classification docstring
lowerCAmelCase__ = 'sail/poolformer_s12'
lowerCAmelCase__ = 'tabby, tabby cat'
lowerCAmelCase__ = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowerCamelCase ( __a, __a = 0.0, __a = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE_ = 1 - drop_prob
SCREAMING_SNAKE_CASE_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE_ = keep_prob + torch.rand(__a, dtype=input.dtype, device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE_ = input.div(__a ) * random_tensor
return output
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = drop_prob
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return drop_path(SCREAMING_SNAKE_CASE_ , self.drop_prob , self.training )
def _lowercase (self ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = patch_size if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE_ = stride if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE_ = padding if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE_ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = norm_layer(SCREAMING_SNAKE_CASE_ ) if norm_layer else nn.Identity()
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.projection(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.norm(SCREAMING_SNAKE_CASE_ )
return embeddings
class snake_case ( nn.GroupNorm ):
def __init__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(1 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.AvgPoolad(SCREAMING_SNAKE_CASE_ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self.pool(SCREAMING_SNAKE_CASE_ ) - hidden_states
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
SCREAMING_SNAKE_CASE_ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
SCREAMING_SNAKE_CASE_ = PoolFormerDropPath(SCREAMING_SNAKE_CASE_ )
if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_ = config.hidden_act
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.conva(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.act_fn(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.drop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.conva(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.drop(SCREAMING_SNAKE_CASE_ )
return hidden_states
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = PoolFormerPooling(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = PoolFormerOutput(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE_ )
# Useful for training neural nets
SCREAMING_SNAKE_CASE_ = PoolFormerDropPath(SCREAMING_SNAKE_CASE_ ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE_ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE_) ) , requires_grad=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE_) ) , requires_grad=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE_ = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE_ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = self.output(self.after_norm(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE_ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE_ = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE_ ) ) )
# First residual connection
SCREAMING_SNAKE_CASE_ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE_ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE_ = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE_ ) ) )
SCREAMING_SNAKE_CASE_ = hidden_states + layer_output
SCREAMING_SNAKE_CASE_ = (output,) + outputs
return outputs
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE_ = nn.ModuleList(SCREAMING_SNAKE_CASE_ )
# Transformer blocks
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE_ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = nn.ModuleList(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE_ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE_ = embedding_layer(SCREAMING_SNAKE_CASE_ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = blk(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase ):
UpperCAmelCase__ = PoolFormerConfig
UpperCAmelCase__ = '''poolformer'''
UpperCAmelCase__ = '''pixel_values'''
UpperCAmelCase__ = True
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = value
lowerCAmelCase__ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __lowercase , )
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = PoolFormerEncoder(SCREAMING_SNAKE_CASE_ )
# Initialize weights and apply final processing
self.post_init()
def _lowercase (self ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
SCREAMING_SNAKE_CASE_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=encoder_outputs.hidden_states , )
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.hidden_size )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dense(SCREAMING_SNAKE_CASE_ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , __lowercase , )
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = config.num_labels
SCREAMING_SNAKE_CASE_ = PoolFormerModel(SCREAMING_SNAKE_CASE_ )
# Final norm
SCREAMING_SNAKE_CASE_ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE_ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.poolformer(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = outputs[0]
SCREAMING_SNAKE_CASE_ = self.classifier(self.norm(SCREAMING_SNAKE_CASE_ ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_ = '''single_label_classification'''
else:
SCREAMING_SNAKE_CASE_ = '''multi_label_classification'''
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
SCREAMING_SNAKE_CASE_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states ) | 626 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
snake_case_ = RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
snake_case_ = torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename='''pytorch_model.bin''' ) )
snake_case_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
snake_case_ = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
snake_case_ = tensor_value
snake_case_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , state_dict=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# convert tokenizer
snake_case_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 531 |
import copy
import re
class lowerCAmelCase_ :
'''simple docstring'''
__snake_case = "hp"
__snake_case = {}
__snake_case = None
@classmethod
def UpperCamelCase__ ( cls , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word ):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCAmelCase ) + 1 ):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCAmelCase ):
snake_case_ = ''''''
while integer != 0:
snake_case_ = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '''#''' + int_to_alphabetic(_UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def UpperCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = param_name.split('''_''' )
snake_case_ = [TrialShortNamer.shortname_for_word(_UpperCAmelCase , _UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['''''', '''_''']
for separator in separators:
snake_case_ = separator.join(_UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = TrialShortNamer.shortname_for_key(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def UpperCamelCase__ ( cls ):
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
snake_case_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = info
@classmethod
def UpperCamelCase__ ( cls , _UpperCAmelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['''short_param'''][k]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = 1 if v else 0
snake_case_ = '''''' if isinstance(_UpperCAmelCase , (int, float) ) else '''-'''
snake_case_ = F'''{key}{sep}{v}'''
name.append(_UpperCAmelCase )
return "_".join(_UpperCAmelCase )
@classmethod
def UpperCamelCase__ ( cls , _UpperCAmelCase ):
snake_case_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('''_''' )
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('''-''' )
else:
snake_case_ = re.sub('''[0-9.]''' , '''''' , _UpperCAmelCase )
snake_case_ = float(re.sub('''[^0-9.]''' , '''''' , _UpperCAmelCase ) )
snake_case_ = cls.NAMING_INFO['''reverse_short_param'''][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters | 531 | 1 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1024 ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = [], []
SCREAMING_SNAKE_CASE__ : List[Any] = list(zip(__lowerCAmelCase , __lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = sorted_examples[0]
def is_too_big(__lowerCAmelCase ):
return tok(__lowerCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE__ : Dict = new_src + """ """ + src
SCREAMING_SNAKE_CASE__ : Any = new_tgt + """ """ + tgt
if is_too_big(__lowerCAmelCase ) or is_too_big(__lowerCAmelCase ): # cant fit, finalize example
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
return finished_src, finished_tgt
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int = Path(__lowerCAmelCase )
save_path.mkdir(exist_ok=__lowerCAmelCase )
for split in ["train"]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
SCREAMING_SNAKE_CASE__ : str = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
SCREAMING_SNAKE_CASE__ : int = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = pack_examples(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
print(F'''packed {split} split from {len(__lowerCAmelCase )} examples -> {len(__lowerCAmelCase )}.''' )
Path(save_path / F'''{split}.source''' ).open("""w""" ).write("""\n""".join(__lowerCAmelCase ) )
Path(save_path / F'''{split}.target''' ).open("""w""" ).write("""\n""".join(__lowerCAmelCase ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(__lowerCAmelCase , save_path / F'''{split}.source''' )
shutil.copyfile(__lowerCAmelCase , save_path / F'''{split}.target''' )
def _lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=__lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=__lowerCAmelCase , default=128 )
parser.add_argument("""--data_dir""" , type=__lowerCAmelCase )
parser.add_argument("""--save_path""" , type=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 680 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Tuple = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : List[str] = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = processor(images=_a , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : str = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : List[Any] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : Any = processor.post_process_masks(_a , _a , _a )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Dict = processor.post_process_masks(
_a , torch.tensor(_a ) , torch.tensor(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
SCREAMING_SNAKE_CASE__ : Dict = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Dict = [[1, 0], [0, 1]]
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
@require_vision
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[int] = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Any = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Any = processor(images=_a , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(_a , _a , _a , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(
_a , tf.convert_to_tensor(_a ) , tf.convert_to_tensor(_a ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Any = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE__ : str = processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = [tf.convert_to_tensor(_a )]
SCREAMING_SNAKE_CASE__ : Dict = [torch.tensor(_a )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : List[str] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : List[Any] = processor.post_process_masks(
_a , _a , _a , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ : List[str] = processor.post_process_masks(
_a , _a , _a , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : int = image_processor(_a , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : Any = processor(images=_a , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(_a , return_tensors="""tf""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : str = processor(images=_a , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
| 680 | 1 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class a__ ( __lowercase , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = WavaVecaPhonemeCTCTokenizer
lowercase__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().setUp()
lowerCAmelCase__ = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
lowerCAmelCase__ = dict(zip(_A , range(len(_A ) ) ) )
lowerCAmelCase__ = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=20 , lowerCamelCase_=5 ) -> Optional[int]:
lowerCAmelCase__ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_A )) for i in range(len(_A ) )]
lowerCAmelCase__ = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
lowerCAmelCase__ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
lowerCAmelCase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
lowerCAmelCase__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
lowerCAmelCase__ = ''' ''' + output_txt
lowerCAmelCase__ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
lowerCAmelCase__ = tokenizer('''m xxx ɪ''' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
lowerCAmelCase__ = tokenizer('''m aaa ɪ ccc''' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCAmelCase__ = tokenizer('''maɪ c''' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [3, 2_00] ) # mai should be <unk> (=3)
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCAmelCase__ = '''Hello how are you'''
lowerCAmelCase__ = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(_A , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCAmelCase__ = '''Hello how are you'''
lowerCAmelCase__ = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCAmelCase__ = '''Hello how are you'''
lowerCAmelCase__ = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
lowerCAmelCase__ = tokenizer.decode(tokenizer(_A ).input_ids )
self.assertEqual(_A , _A )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCAmelCase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCAmelCase__ = tokenizer.decode(sample_ids[0] )
lowerCAmelCase__ = tokenizer.batch_decode(_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCAmelCase__ = '''Hello how are you'''
lowerCAmelCase__ = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(_A , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCAmelCase__ = '''Hello how are you'''
lowerCAmelCase__ = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
lowerCAmelCase__ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCAmelCase__ = tokenizer.decode(sample_ids[0] )
lowerCAmelCase__ = tokenizer.batch_decode(_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
lowerCAmelCase__ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_A )
lowerCAmelCase__ = tokenizer.batch_decode(_A , filter_word_delimiter_token=_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCAmelCase__ = '''Hello how are you'''
lowerCAmelCase__ = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
lowerCAmelCase__ = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A )
self.assertEqual(_A , _A )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCAmelCase__ = '''Hello how are you'''
lowerCAmelCase__ = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
lowerCAmelCase__ = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , _A )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=_A )
lowerCAmelCase__ = '''Hello how are you'''
lowerCAmelCase__ = tokenizer(_A , phonemizer_lang='''en-us''' ).input_ids
lowerCAmelCase__ = tokenizer(_A , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(_A , _A )
lowerCAmelCase__ = tokenizer.decode(_A )
lowerCAmelCase__ = tokenizer.decode(_A )
self.assertEqual(_A , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(_A , '''ɛ l o h aʊ a ʁ j u''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCAmelCase__ = '''Hello how Are you'''
lowerCAmelCase__ = '''hello how are you'''
lowerCAmelCase__ = tokenizer(_A ).input_ids
lowerCAmelCase__ = tokenizer(_A ).input_ids
self.assertEqual(_A , _A )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
lowerCAmelCase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
lowerCAmelCase__ = tokenizer.batch_decode(_A )
self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = [d[key] for d in offsets]
return retrieved_list
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCAmelCase__ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCAmelCase__ = tokenizer.decode(_A , output_char_offsets=_A , filter_word_delimiter_token=_A )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(isinstance(_A , _A ) )
self.assertTrue(isinstance(outputs_list[0] , _A ) )
# transform list to ModelOutput
lowerCAmelCase__ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(lowerCamelCase_ , lowerCamelCase_ ):
if isinstance(_A , _A ):
[recursive_check(_A , _A ) for la, la in zip(_A , _A )]
self.assertEqual(_A , _A )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
lowerCAmelCase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCAmelCase__ = tokenizer.batch_decode(_A , output_char_offsets=_A )
lowerCAmelCase__ = [tokenizer.decode(_A , output_char_offsets=_A ) for ids in sample_ids]
check_list_tuples_equal(_A , _A )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = tokenizer.vocab_size
lowerCAmelCase__ = len(_A )
self.assertNotEqual(_A , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase__ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
lowerCAmelCase__ = tokenizer.add_tokens(_A )
lowerCAmelCase__ = tokenizer.vocab_size
lowerCAmelCase__ = len(_A )
self.assertNotEqual(_A , 0 )
self.assertEqual(_A , _A )
self.assertEqual(_A , len(_A ) )
self.assertEqual(_A , all_size + len(_A ) )
lowerCAmelCase__ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_A )
self.assertGreaterEqual(len(_A ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase__ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
lowerCAmelCase__ = tokenizer.add_special_tokens(_A )
lowerCAmelCase__ = tokenizer.vocab_size
lowerCAmelCase__ = len(_A )
self.assertNotEqual(_A , 0 )
self.assertEqual(_A , _A )
self.assertEqual(_A , len(_A ) )
self.assertEqual(_A , all_size_a + len(_A ) )
lowerCAmelCase__ = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_A )
self.assertGreaterEqual(len(_A ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
lowerCAmelCase__ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
lowerCAmelCase__ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(output['''text'''] , _A ) | 705 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _snake_case ( A = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def _snake_case ( A = "" ) -> bool:
if len(A ) == 0:
return True
lowerCAmelCase__ = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCAmelCase__ = {}
for character in lower_case_input_str:
lowerCAmelCase__ = character_freq_dict.get(A , 0 ) + 1
lowerCAmelCase__ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _snake_case ( A = "" ) -> None:
print('''\nFor string = ''' , A , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(A ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(A ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__UpperCAmelCase = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
__UpperCAmelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""") | 98 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__A = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
__A = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
__A = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
__A = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__A = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 325 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
def extract(*_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = torch.ones([0] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ):
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
SCREAMING_SNAKE_CASE_ = unet.half()
SCREAMING_SNAKE_CASE_ = vae.half()
SCREAMING_SNAKE_CASE_ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 31 | 0 |
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_UpperCAmelCase : Optional[Any] = get_logger()
_UpperCAmelCase : List[Any] = None
class __lowerCAmelCase ( TensorFormatter[Mapping, '''jax.Array''', Mapping]):
def __init__( self: int , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: int=None , **_lowerCAmelCase: List[str] ):
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A , _A ):
raise ValueError(
F"Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
lowercase :int = device if isinstance(_A , _A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase :Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
lowercase :List[str] = str(jax.devices()[0] )
lowercase :int = jnp_array_kwargs
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
import jax
return {str(_A ): device for device in jax.devices()}
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: int ):
import jax
import jax.numpy as jnp
if isinstance(_A , _A ) and column:
if all(
isinstance(_A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A , axis=0 )
return column
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[Any] ):
import jax
import jax.numpy as jnp
if isinstance(_A , (str, bytes, type(_A )) ):
return value
elif isinstance(_A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase :Optional[Any] = {}
if isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase :List[str] = {'dtype': jnp.intaa}
else:
lowercase :Tuple = {'dtype': jnp.intaa}
elif isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase :Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A , PIL.Image.Image ):
lowercase :int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase :Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A , **{**default_dtype, **self.jnp_array_kwargs} )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A , "__array__" ) and not isinstance(_A , jax.Array ):
lowercase :Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: str ):
return map_nested(self._recursive_tensorize , _A , map_list=_A )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: str ):
lowercase :Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
lowercase :int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Any ):
lowercase :Dict = self.numpy_arrow_extractor().extract_column(_A )
lowercase :List[Any] = self.python_features_decoder.decode_column(_A , pa_table.column_names[0] )
lowercase :Optional[Any] = self.recursive_tensorize(_A )
lowercase :Optional[Any] = self._consolidate(_A )
return column
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Union[str, Any] ):
lowercase :Tuple = self.numpy_arrow_extractor().extract_batch(_A )
lowercase :Any = self.python_features_decoder.decode_batch(_A )
lowercase :str = self.recursive_tensorize(_A )
for column_name in batch:
lowercase :Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 716 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :List[str] = inspect.getfile(accelerate.test_utils )
lowercase :List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowercase :str = test_metrics
@require_cpu
def SCREAMING_SNAKE_CASE ( self: Tuple ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def SCREAMING_SNAKE_CASE ( self: int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
self.test_metrics.main()
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowercase :str = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
| 453 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCamelCase : Optional[Any] = False
try:
lowerCamelCase : Optional[int] = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __lowercase :
"""simple docstring"""
def __init__( self , A = None , A = [] ) -> int:
snake_case : Tuple = 0
snake_case : str = choices
snake_case : Optional[int] = prompt
if sys.platform == "win32":
snake_case : Union[str, Any] = """*"""
else:
snake_case : List[Any] = """➔ """
def UpperCAmelCase ( self , A , A = "" ) -> List[Any]:
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , A )
else:
forceWrite(self.choices[index] , A )
def UpperCAmelCase ( self , A ) -> Optional[int]:
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(A )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def UpperCAmelCase ( self , A , A = 1 ) -> List[str]:
snake_case : Any = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(A )
move_cursor(A , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def UpperCAmelCase ( self ) -> List[str]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def UpperCAmelCase ( self ) -> Tuple:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def UpperCAmelCase ( self ) -> List[Any]:
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def UpperCAmelCase ( self ) -> List[str]:
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(A )] for number in range(1_0 )] )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Any = int(chr(self.current_selection ) )
snake_case : Tuple = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , A )
else:
return
else:
return
def UpperCAmelCase ( self , A = 0 ) -> Optional[int]:
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
snake_case : Tuple = default_choice
for i in range(len(self.choices ) ):
self.print_choice(A )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
snake_case : Tuple = int(builtins.input() )
except ValueError:
snake_case : Optional[int] = default_choice
else:
snake_case : int = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(A , """\n""" )
return choice
| 587 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """camembert"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> Any:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
snake_case : int = vocab_size
snake_case : Dict = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Any = hidden_act
snake_case : List[str] = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : str = type_vocab_size
snake_case : List[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = position_embedding_type
snake_case : Tuple = use_cache
snake_case : Tuple = classifier_dropout
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 587 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''')
UpperCAmelCase_ = tokenizer('''Hello there''' , return_tensors='''tf''').input_ids
UpperCAmelCase_ = tokenizer('''Hi I am''' , return_tensors='''tf''').input_ids
UpperCAmelCase_ = model(_snake_case , labels=_snake_case).loss
UpperCAmelCase_ = -tf.math.reduce_mean(_snake_case).numpy()
UpperCAmelCase_ = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4)
| 169 |
import random
def A (__A : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = num - 1
UpperCAmelCase_ = 0
while s % 2 == 0:
UpperCAmelCase_ = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase_ = random.randrange(2 , num - 1 )
UpperCAmelCase_ = pow(__A , __A , __A )
if v != 1:
UpperCAmelCase_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase_ = i + 1
UpperCAmelCase_ = (v**2) % num
return True
def A (__A : int ) -> bool:
"""simple docstring"""
if num < 2:
return False
UpperCAmelCase_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__A )
def A (__A : int = 1024 ) -> int:
"""simple docstring"""
while True:
UpperCAmelCase_ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__A ):
return num
if __name__ == "__main__":
snake_case_ : str = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 169 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 288 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( _snake_case ):
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """num_encoder_blocks""" ) )
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=64 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=[2, 2, 2, 2] , UpperCamelCase__=[8, 4, 2, 1] , UpperCamelCase__=[16, 32, 64, 128] , UpperCamelCase__=[1, 4, 8, 16] , UpperCamelCase__=[1, 2, 4, 8] , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=None , ) -> Tuple:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = num_encoder_blocks
A_ = sr_ratios
A_ = depths
A_ = hidden_sizes
A_ = downsampling_rates
A_ = num_attention_heads
A_ = is_training
A_ = use_labels
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = scope
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = SegformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ )
A_ = A_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.num_labels
A_ = SegformerForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = 1
A_ = SegformerForSemanticSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ )
A_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = SegformerModelTester(self )
A_ = SegformerConfigTester(self , config_class=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
pass
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCamelCase__ )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = True
A_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A_ = outputs.attentions
A_ = sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A_ = (self.model_tester.image_size // 4) ** 2
A_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A_ = (self.model_tester.image_size // 32) ** 2
A_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A_ = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A_ = (self.model_tester.image_size // 4) ** 2
A_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A_ = outputs.hidden_states
A_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ):
continue
A_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
A_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
A_ = model(**UpperCamelCase__ ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
pass
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = SegformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase__ ( ) -> Any:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Any:
'''simple docstring'''
# only resize + normalize
A_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A_ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase__ )
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
A_ = encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A_ = model(UpperCamelCase__ )
A_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A_ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
# only resize + normalize
A_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A_ = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(UpperCamelCase__ )
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
A_ = encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A_ = model(UpperCamelCase__ )
A_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A_ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-1 ) )
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
# only resize + normalize
A_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A_ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase__ )
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
A_ = encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A_ = model(UpperCamelCase__ )
A_ = outputs.logits.detach().cpu()
A_ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
A_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A_ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 288 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=4_00 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , ):
snake_case_ : List[Any] = size if size is not None else {"""shortest_edge""": 20}
snake_case_ : List[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
snake_case_ : Union[str, Any] = parent
snake_case_ : Any = batch_size
snake_case_ : Tuple = num_channels
snake_case_ : Optional[int] = image_size
snake_case_ : int = min_resolution
snake_case_ : Union[str, Any] = max_resolution
snake_case_ : int = do_resize
snake_case_ : List[str] = size
snake_case_ : List[Any] = do_center_crop
snake_case_ : str = crop_size
snake_case_ : Dict = do_flip_channel_order
def __UpperCamelCase (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MobileViTImageProcessor if is_vision_available() else None
def __UpperCamelCase (self ):
snake_case_ : List[Any] = MobileViTImageProcessingTester(self )
@property
def __UpperCamelCase (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase__ , """size""" ) )
self.assertTrue(hasattr(lowercase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowercase__ , """center_crop""" ) )
self.assertTrue(hasattr(lowercase__ , """do_flip_channel_order""" ) )
def __UpperCamelCase (self ):
snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
snake_case_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : Union[str, Any] = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : Dict = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : Any = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 48 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
a__ =dataset
a__ =process
a__ =params
def __len__( self) -> List[str]:
return len(self.dataset)
def __getitem__( self , lowercase_) -> str:
a__ =self.dataset[i]
a__ =self.process(lowercase_ , **self.params)
return processed
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> Optional[int]:
a__ =loader
a__ =infer
a__ =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
a__ =None
a__ =loader_batch_size
# Internal bookkeeping
a__ =None
a__ =None
def __len__( self) -> List[Any]:
return len(self.loader)
def __iter__( self) -> Dict:
a__ =iter(self.loader)
return self
def __UpperCamelCase ( self) -> Any:
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
a__ =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
a__ ={}
for k, element in self._loader_batch_data.items():
if isinstance(lowercase_ , lowercase_):
# Convert ModelOutput to tuple first
a__ =element.to_tuple()
if isinstance(element[0] , torch.Tensor):
a__ =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
a__ =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ , lowercase_):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
a__ =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
a__ =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
a__ =None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
a__ =element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
a__ =np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
a__ =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
a__ =self._loader_batch_data.__class__(lowercase_)
self._loader_batch_index += 1
return result
def __UpperCamelCase ( self) -> List[Any]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
a__ =next(self.iterator)
a__ =self.infer(lowercase_ , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowercase_ , torch.Tensor):
a__ =processed
else:
a__ =list(processed.keys())[0]
a__ =processed[key]
if isinstance(lowercase_ , lowercase_):
a__ =len(lowercase_)
else:
a__ =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
a__ =observed_batch_size
# Setting internal index to unwrap the batch
a__ =processed
a__ =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> Optional[Any]:
super().__init__(lowercase_ , lowercase_ , lowercase_)
def __iter__( self) -> str:
a__ =iter(self.loader)
a__ =None
return self
def __UpperCamelCase ( self) -> Tuple:
if self.subiterator is None:
a__ =self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
a__ =next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
a__ =self.infer(next(self.iterator) , **self.params)
a__ =next(self.subiterator)
return processed
class lowercase_ (lowercase__ ):
def __iter__( self) -> Dict:
a__ =iter(self.loader)
return self
def __UpperCamelCase ( self) -> str:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
a__ =False
a__ =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
a__ =self.loader_batch_item()
a__ =item.pop('is_last')
accumulator.append(lowercase_)
if is_last:
return accumulator
while not is_last:
a__ =self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(lowercase_ , torch.Tensor):
a__ =processed
else:
a__ =list(processed.keys())[0]
a__ =processed[key]
if isinstance(lowercase_ , lowercase_):
a__ =len(lowercase_)
else:
a__ =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
a__ =observed_batch_size
a__ =processed
a__ =0
while self._loader_batch_index < self.loader_batch_size:
a__ =self.loader_batch_item()
a__ =item.pop('is_last')
accumulator.append(lowercase_)
if is_last:
return accumulator
else:
a__ =processed
a__ =item.pop('is_last')
accumulator.append(lowercase_)
return accumulator
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_) -> Union[str, Any]:
a__ =dataset
a__ =key
def __len__( self) -> Optional[Any]:
return len(self.dataset)
def __getitem__( self , lowercase_) -> Any:
return self.dataset[i][self.key]
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_) -> str:
a__ =dataset
a__ =keya
a__ =keya
def __len__( self) -> Any:
return len(self.dataset)
def __getitem__( self , lowercase_) -> List[str]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 20 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = TextToVideoSDPipeline
snake_case__ = TEXT_TO_IMAGE_PARAMS
snake_case__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def __lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') ,up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act='gelu' ,projection_dim=512 ,)
UpperCAmelCase__ = CLIPTextModel(lowerCamelCase__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[int]=0 ):
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = TextToVideoSDPipeline(**lowerCamelCase__ )
UpperCAmelCase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
UpperCAmelCase__ = 'np'
UpperCAmelCase__ = sd_pipe(**lowerCamelCase__ ).frames
UpperCAmelCase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase__ = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Union[str, Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowerCAmelCase ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
def __lowerCAmelCase ( self : Tuple ):
return super().test_progress_bar()
@slow
@skip_mps
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
UpperCAmelCase__ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
UpperCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase__ = pipe.to('cuda' )
UpperCAmelCase__ = 'Spiderman is surfing'
UpperCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase__ = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type='pt' ).frames
UpperCAmelCase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
UpperCAmelCase__ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
UpperCAmelCase__ = pipe.to('cuda' )
UpperCAmelCase__ = 'Spiderman is surfing'
UpperCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase__ = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type='pt' ).frames
UpperCAmelCase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 632 | """simple docstring"""
import re
def a_ ( lowerCamelCase ):
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
try:
UpperCAmelCase__ = split_input(lowerCamelCase )
if upper:
UpperCAmelCase__ = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase__ = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def a_ ( lowerCamelCase ):
return to_simple_case(lowerCamelCase )
def a_ ( lowerCamelCase ):
try:
UpperCAmelCase__ = to_simple_case(lowerCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def a_ ( lowerCamelCase , lowerCamelCase ):
return to_complex_case(lowerCamelCase , lowerCamelCase , '_' )
def a_ ( lowerCamelCase , lowerCamelCase ):
return to_complex_case(lowerCamelCase , lowerCamelCase , '-' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 632 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_snake_case = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe.dual_guided(
prompt='first prompt' , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCamelCase = generator.manual_seed(0 )
UpperCamelCase = pipe.dual_guided(
prompt='first prompt' , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCamelCase = '''cyberpunk 2077'''
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe.dual_guided(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
UpperCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase = '''A painting of a squirrel eating a burger '''
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe.text_to_image(
prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
UpperCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='numpy' ).images
UpperCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 282 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ):
'''simple docstring'''
lowercase : str =parent
lowercase : int =batch_size
lowercase : Any =seq_length
lowercase : int =is_training
lowercase : str =use_input_mask
lowercase : int =use_token_type_ids
lowercase : Dict =use_labels
lowercase : int =vocab_size
lowercase : str =embedding_size
lowercase : Union[str, Any] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_hidden_groups
lowercase : Union[str, Any] =num_attention_heads
lowercase : Any =intermediate_size
lowercase : Tuple =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : List[Any] =max_position_embeddings
lowercase : int =type_vocab_size
lowercase : int =type_sequence_label_size
lowercase : Any =initializer_range
lowercase : List[Any] =num_labels
lowercase : int =num_choices
lowercase : Optional[int] =scope
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[int] =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Dict =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Tuple =None
lowercase : Any =None
lowercase : Dict =None
if self.use_labels:
lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Any =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : int =AlbertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Optional[int] =self.num_choices
lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Union[str, Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ):
'''simple docstring'''
lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowercase : Any =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
lowercase : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =AlbertModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Tuple =type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
lowercase : int =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 481 |
import re
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Any = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_lowerCAmelCase ,_lowerCAmelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895"""))
| 481 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
create_state_space_tree(__a , [] , 0 , [0 for i in range(len(__a ) )] )
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
"""simple docstring"""
if index == len(__a ):
print(__a )
return
for i in range(len(__a ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
a_ = True
create_state_space_tree(__a , __a , index + 1 , __a )
current_sequence.pop()
a_ = False
A_ : list[int | str] =[3, 1, 2, 4]
generate_all_permutations(sequence)
A_ : list[int | str] =["A", "B", "C"]
generate_all_permutations(sequence_a) | 483 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "efficientformer"
def __init__( self : Any , lowercase_ : List[int] = [3, 2, 6, 4] , lowercase_ : List[int] = [48, 96, 224, 448] , lowercase_ : List[bool] = [True, True, True, True] , lowercase_ : int = 448 , lowercase_ : int = 32 , lowercase_ : int = 4 , lowercase_ : int = 7 , lowercase_ : int = 5 , lowercase_ : int = 8 , lowercase_ : int = 4 , lowercase_ : float = 0.0 , lowercase_ : int = 16 , lowercase_ : int = 3 , lowercase_ : int = 3 , lowercase_ : int = 3 , lowercase_ : int = 2 , lowercase_ : int = 1 , lowercase_ : float = 0.0 , lowercase_ : int = 1 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : float = 1e-5 , lowercase_ : str = "gelu" , lowercase_ : float = 0.02 , lowercase_ : float = 1e-12 , lowercase_ : int = 224 , lowercase_ : float = 1e-05 , **lowercase_ : str , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = hidden_sizes
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE_ : List[str] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = depths
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mlp_expansion_ratio
SCREAMING_SNAKE_CASE_ : Optional[int] = downsamples
SCREAMING_SNAKE_CASE_ : Dict = dim
SCREAMING_SNAKE_CASE_ : Tuple = key_dim
SCREAMING_SNAKE_CASE_ : str = attention_ratio
SCREAMING_SNAKE_CASE_ : Optional[Any] = resolution
SCREAMING_SNAKE_CASE_ : Any = pool_size
SCREAMING_SNAKE_CASE_ : str = downsample_patch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = downsample_stride
SCREAMING_SNAKE_CASE_ : List[str] = downsample_pad
SCREAMING_SNAKE_CASE_ : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : Tuple = num_metaad_blocks
SCREAMING_SNAKE_CASE_ : Any = distillation
SCREAMING_SNAKE_CASE_ : Optional[int] = use_layer_scale
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_scale_init_value
SCREAMING_SNAKE_CASE_ : List[str] = image_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_norm_eps
| 512 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class A__ ( snake_case_ , snake_case_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : List[str] = 128 , _SCREAMING_SNAKE_CASE : Any = 256 , _SCREAMING_SNAKE_CASE : Dict = 2_0_0_0.0 , _SCREAMING_SNAKE_CASE : List[str] = 768 , _SCREAMING_SNAKE_CASE : Union[str, Any] = 12 , _SCREAMING_SNAKE_CASE : Tuple = 12 , _SCREAMING_SNAKE_CASE : Optional[int] = 64 , _SCREAMING_SNAKE_CASE : Any = 2048 , _SCREAMING_SNAKE_CASE : Optional[int] = 0.1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Sequential(
nn.Linear(_SCREAMING_SNAKE_CASE , d_model * 4 , bias=_SCREAMING_SNAKE_CASE ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_SCREAMING_SNAKE_CASE ) , nn.SiLU() , )
UpperCamelCase = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = False
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Dropout(p=_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(_SCREAMING_SNAKE_CASE ):
# FiLM conditional T5 decoder
UpperCamelCase = DecoderLayer(d_model=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE )
self.decoders.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = TaLayerNorm(_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Dropout(p=_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase = self.conditioning_emb(_SCREAMING_SNAKE_CASE ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase = torch.broadcast_to(
torch.arange(_SCREAMING_SNAKE_CASE , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase = self.position_encoding(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.continuous_inputs_projection(_SCREAMING_SNAKE_CASE )
inputs += position_encodings
UpperCamelCase = self.dropout(_SCREAMING_SNAKE_CASE )
# decoder: No padding present.
UpperCamelCase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase = [(x, self.encoder_decoder_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase = lyr(
_SCREAMING_SNAKE_CASE , conditioning_emb=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )[0]
UpperCamelCase = self.decoder_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.post_dropout(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.spec_out(_SCREAMING_SNAKE_CASE )
return spec_out
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str]=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE , layer_norm_epsilon=_SCREAMING_SNAKE_CASE , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE , layer_norm_epsilon=_SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( self : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , ):
"""simple docstring"""
UpperCamelCase = self.layer[0](
_SCREAMING_SNAKE_CASE , conditioning_emb=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , )
if encoder_hidden_states is not None:
UpperCamelCase = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to(
encoder_hidden_states.dtype )
UpperCamelCase = self.layer[1](
_SCREAMING_SNAKE_CASE , key_value_states=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase = self.layer[-1](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return (hidden_states,)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = TaLayerNorm(_SCREAMING_SNAKE_CASE )
UpperCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=_SCREAMING_SNAKE_CASE )
UpperCamelCase = Attention(query_dim=_SCREAMING_SNAKE_CASE , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , out_bias=_SCREAMING_SNAKE_CASE , scale_qk=_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Dropout(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = self.layer_norm(_SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCamelCase = self.FiLMLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Self-attention block
UpperCamelCase = self.attention(_SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden_states + self.dropout(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = Attention(query_dim=_SCREAMING_SNAKE_CASE , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , out_bias=_SCREAMING_SNAKE_CASE , scale_qk=_SCREAMING_SNAKE_CASE )
UpperCamelCase = TaLayerNorm(_SCREAMING_SNAKE_CASE , eps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Dropout(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : str=None , ):
"""simple docstring"""
UpperCamelCase = self.layer_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.attention(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase = hidden_states + self.dropout(_SCREAMING_SNAKE_CASE )
return layer_output
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = TaDenseGatedActDense(d_model=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE )
UpperCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=_SCREAMING_SNAKE_CASE )
UpperCamelCase = TaLayerNorm(_SCREAMING_SNAKE_CASE , eps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Dropout(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple=None ):
"""simple docstring"""
UpperCamelCase = self.layer_norm(_SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCamelCase = self.film(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.DenseReluDense(_SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden_states + self.dropout(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Dropout(_SCREAMING_SNAKE_CASE )
UpperCamelCase = NewGELUActivation()
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.act(self.wi_a(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = self.wi_a(_SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden_gelu * hidden_linear
UpperCamelCase = self.dropout(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.wo(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.ones(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = eps
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_SCREAMING_SNAKE_CASE , 3.0 )) ))
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , out_features * 2 , bias=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = self.scale_bias(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = torch.chunk(_SCREAMING_SNAKE_CASE , 2 , -1 )
UpperCamelCase = x * (1 + scale) + shift
return x
| 701 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__magic_name__ : int = get_logger(__name__)
__magic_name__ : Tuple = Path(__file__).parent / '''model_card_template.md'''
__magic_name__ : Any = uuida().hex
__magic_name__ : Optional[Any] = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
__magic_name__ : Optional[int] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
__magic_name__ : str = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def lowercase__ ( _UpperCamelCase = None) -> str:
"""simple docstring"""
UpperCamelCase = F'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'; torch/{_torch_version}'
if is_flax_available():
ua += F'; jax/{_jax_version}'
ua += F'; flax/{_flax_version}'
if is_onnx_available():
ua += F'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_UpperCamelCase , _UpperCamelCase):
ua += "; " + "; ".join(F'{k}/{v}' for k, v in user_agent.items())
elif isinstance(_UpperCamelCase , _UpperCamelCase):
ua += "; " + user_agent
return ua
def lowercase__ ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None) -> Optional[Any]:
"""simple docstring"""
if token is None:
UpperCamelCase = HfFolder.get_token()
if organization is None:
UpperCamelCase = whoami(_UpperCamelCase)['name']
return F'{username}/{model_id}'
else:
return F'{organization}/{model_id}'
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> str:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.')
if hasattr(_UpperCamelCase , 'local_rank') and args.local_rank not in [-1, 0]:
return
UpperCamelCase = args.hub_token if hasattr(_UpperCamelCase , 'hub_token') else None
UpperCamelCase = get_full_repo_name(_UpperCamelCase , token=_UpperCamelCase)
UpperCamelCase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_UpperCamelCase , model_name=_UpperCamelCase , repo_name=_UpperCamelCase , dataset_name=args.dataset_name if hasattr(_UpperCamelCase , 'dataset_name') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_UpperCamelCase , 'gradient_accumulation_steps') else None
) , adam_betaa=args.adam_betaa if hasattr(_UpperCamelCase , 'adam_beta1') else None , adam_betaa=args.adam_betaa if hasattr(_UpperCamelCase , 'adam_beta2') else None , adam_weight_decay=args.adam_weight_decay if hasattr(_UpperCamelCase , 'adam_weight_decay') else None , adam_epsilon=args.adam_epsilon if hasattr(_UpperCamelCase , 'adam_epsilon') else None , lr_scheduler=args.lr_scheduler if hasattr(_UpperCamelCase , 'lr_scheduler') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_UpperCamelCase , 'lr_warmup_steps') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_UpperCamelCase , 'ema_inv_gamma') else None , ema_power=args.ema_power if hasattr(_UpperCamelCase , 'ema_power') else None , ema_max_decay=args.ema_max_decay if hasattr(_UpperCamelCase , 'ema_max_decay') else None , mixed_precision=args.mixed_precision , )
UpperCamelCase = os.path.join(args.output_dir , 'README.md')
model_card.save(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase = None) -> Any:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCamelCase = str(Path(_UpperCamelCase).as_posix())
UpperCamelCase = re.search(r'snapshots/([^/]+)/' , _UpperCamelCase)
if search is None:
return None
UpperCamelCase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_UpperCamelCase) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__magic_name__ : Optional[int] = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
__magic_name__ : Tuple = os.path.join(hf_cache_home, '''diffusers''')
def lowercase__ ( _UpperCamelCase = None , _UpperCamelCase = None) -> None:
"""simple docstring"""
if new_cache_dir is None:
UpperCamelCase = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCamelCase = old_diffusers_cache
UpperCamelCase = Path(_UpperCamelCase).expanduser()
UpperCamelCase = Path(_UpperCamelCase).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*'):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCamelCase = new_cache_dir / old_blob_path.relative_to(_UpperCamelCase)
new_blob_path.parent.mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase)
os.replace(_UpperCamelCase , _UpperCamelCase)
try:
os.symlink(_UpperCamelCase , _UpperCamelCase)
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__magic_name__ : Dict = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
__magic_name__ : str = 0
else:
with open(cache_version_file) as f:
try:
__magic_name__ : Union[str, Any] = int(f.read())
except ValueError:
__magic_name__ : Union[str, Any] = 0
if cache_version < 1:
__magic_name__ : List[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
__magic_name__ : Any = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase = None) -> str:
"""simple docstring"""
if variant is not None:
UpperCamelCase = weights_name.split('.')
UpperCamelCase = splits[:-1] + [variant] + splits[-1:]
UpperCamelCase = '.'.join(_UpperCamelCase)
return weights_name
def lowercase__ ( _UpperCamelCase , *,
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , ) -> Any:
"""simple docstring"""
UpperCamelCase = str(_UpperCamelCase)
if os.path.isfile(_UpperCamelCase):
return pretrained_model_name_or_path
elif os.path.isdir(_UpperCamelCase):
if os.path.isfile(os.path.join(_UpperCamelCase , _UpperCamelCase)):
# Load from a PyTorch checkpoint
UpperCamelCase = os.path.join(_UpperCamelCase , _UpperCamelCase)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)):
UpperCamelCase = os.path.join(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
return model_file
else:
raise EnvironmentError(
F'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.')
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_UpperCamelCase).base_version) >= version.parse('0.20.0')
):
try:
UpperCamelCase = hf_hub_download(
_UpperCamelCase , filename=_add_variant(_UpperCamelCase , _UpperCamelCase) , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , local_files_only=_UpperCamelCase , use_auth_token=_UpperCamelCase , user_agent=_UpperCamelCase , subfolder=_UpperCamelCase , revision=revision or commit_hash , )
warnings.warn(
F'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , _UpperCamelCase , )
return model_file
except: # noqa: E722
warnings.warn(
F'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_UpperCamelCase , _UpperCamelCase)} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_UpperCamelCase , _UpperCamelCase)}\' so that the correct variant file can be added.' , _UpperCamelCase , )
try:
# 2. Load model file as usual
UpperCamelCase = hf_hub_download(
_UpperCamelCase , filename=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , local_files_only=_UpperCamelCase , use_auth_token=_UpperCamelCase , user_agent=_UpperCamelCase , subfolder=_UpperCamelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.')
except RevisionNotFoundError:
raise EnvironmentError(
F'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'this model name. Check the model page at '
F'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.')
except EntryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.')
except HTTPError as err:
raise EnvironmentError(
F'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}')
except ValueError:
raise EnvironmentError(
F'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
F' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
F' directory containing a file named {weights_name} or'
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.')
except EnvironmentError:
raise EnvironmentError(
F'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
F'containing a file named {weights_name}')
| 410 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger()
def __lowercase ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : LevitConfig , lowerCamelCase : Path , lowerCamelCase : bool = True ):
print(F"Converting {name}..." )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCamelCase_ : int = timm.create_model('levit_128s' , pretrained=lowerCamelCase )
else:
UpperCamelCase_ : str = timm.create_model('levit_128' , pretrained=lowerCamelCase )
if hidden_sizes == 192:
UpperCamelCase_ : List[str] = timm.create_model('levit_192' , pretrained=lowerCamelCase )
if hidden_sizes == 256:
UpperCamelCase_ : Union[str, Any] = timm.create_model('levit_256' , pretrained=lowerCamelCase )
if hidden_sizes == 384:
UpperCamelCase_ : Dict = timm.create_model('levit_384' , pretrained=lowerCamelCase )
from_model.eval()
UpperCamelCase_ : Tuple = LevitForImageClassificationWithTeacher(lowerCamelCase ).eval()
UpperCamelCase_ : Tuple = OrderedDict()
UpperCamelCase_ : Tuple = from_model.state_dict()
UpperCamelCase_ : Optional[int] = list(from_model.state_dict().keys() )
UpperCamelCase_ : Union[str, Any] = list(our_model.state_dict().keys() )
print(len(lowerCamelCase ) , len(lowerCamelCase ) )
for i in range(len(lowerCamelCase ) ):
UpperCamelCase_ : List[Any] = weights[og_keys[i]]
our_model.load_state_dict(lowerCamelCase )
UpperCamelCase_ : List[str] = torch.randn((2, 3, 224, 224) )
UpperCamelCase_ : Dict = from_model(lowerCamelCase )
UpperCamelCase_ : Optional[int] = our_model(lowerCamelCase ).logits
assert torch.allclose(lowerCamelCase , lowerCamelCase ), "The model logits don't match the original one."
UpperCamelCase_ : Union[str, Any] = name
print(lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCamelCase_ : Dict = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"Pushed {checkpoint_name}" )
def __lowercase ( lowerCamelCase : Path , lowerCamelCase : str = None , lowerCamelCase : bool = True ):
UpperCamelCase_ : Any = 'imagenet-1k-id2label.json'
UpperCamelCase_ : int = 1000
UpperCamelCase_ : Optional[int] = (1, num_labels)
UpperCamelCase_ : int = 'huggingface/label-files'
UpperCamelCase_ : Union[str, Any] = num_labels
UpperCamelCase_ : Any = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_ : Union[str, Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_ : List[Any] = idalabel
UpperCamelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCamelCase_ : List[Any] = partial(lowerCamelCase , num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase )
UpperCamelCase_ : Any = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
UpperCamelCase_ : Union[str, Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCamelCase , names_to_config[model_name] , lowerCamelCase , lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
a_ = parser.parse_args()
a_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 417 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: str , UpperCamelCase: Union[str, Any]=7 , UpperCamelCase: Optional[Any]=3 , UpperCamelCase: int=18 , UpperCamelCase: int=30 , UpperCamelCase: List[str]=4_00 , UpperCamelCase: Optional[int]=True , UpperCamelCase: str=None , UpperCamelCase: str=True , UpperCamelCase: Union[str, Any]=None , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 20}
A__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = MobileNetVaImageProcessingTester(self )
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(UpperCamelCase , """crop_size""" ) )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 500 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : List[str] ):
A__ = [0] * len(UpperCAmelCase_ )
A__ = []
A__ = [1] * len(UpperCAmelCase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCAmelCase_ ) ):
if indegree[i] == 0:
queue.append(UpperCAmelCase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(UpperCAmelCase_ )
print(max(UpperCAmelCase_ ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE_ : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 500 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.