code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return base * power(UpperCamelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
_lowerCAmelCase = int(input('''Enter the base: ''').strip())
_lowerCAmelCase = int(input('''Enter the exponent: ''').strip())
_lowerCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_lowerCAmelCase = 1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 37
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return math.pow(_UpperCAmelCase, 2 ) - a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
return 2 * x
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Any = 2.0
while start <= a:
lowerCAmelCase : Dict = math.pow(_UpperCAmelCase, 2 )
return start
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = 9_999, _UpperCAmelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError('math domain error' )
lowerCAmelCase : Optional[Any] = get_initial_point(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
lowerCAmelCase : Any = value
lowerCAmelCase : int = value - fx(_UpperCAmelCase, _UpperCAmelCase ) / fx_derivative(_UpperCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 138
| 0
|
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
UpperCAmelCase_ = b * b - 4 * a * c
UpperCAmelCase_ = (-b + sqrt(lowerCAmelCase__ )) / (2 * a)
UpperCAmelCase_ = (-b - sqrt(lowerCAmelCase__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def a__ ( ):
UpperCAmelCase_ , UpperCAmelCase_ = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 241
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''rwkv'''
UpperCamelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : str , _UpperCAmelCase : int=50277 , _UpperCAmelCase : Optional[Any]=1024 , _UpperCAmelCase : str=4096 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Tuple=1e-5 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=6 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Optional[Any] , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = context_length
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = rescale_every
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 241
| 1
|
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
while len(SCREAMING_SNAKE_CASE__ ) > 1:
snake_case_ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
snake_case_ = files.index(min(SCREAMING_SNAKE_CASE__ ) )
temp += files[min_index]
files.pop(SCREAMING_SNAKE_CASE__ )
files.append(SCREAMING_SNAKE_CASE__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class A ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'resnet'
lowerCamelCase = ['basic', 'bottleneck']
def __init__( self : Optional[Any],lowercase_ : int=3,lowercase_ : List[str]=6_4,lowercase_ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8],lowercase_ : Tuple=[3, 4, 6, 3],lowercase_ : Union[str, Any]="bottleneck",lowercase_ : List[str]="relu",lowercase_ : Tuple=False,lowercase_ : List[str]=None,lowercase_ : List[Any]=None,**lowercase_ : str,)-> Optional[Any]:
'''simple docstring'''
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
A__ = num_channels
A__ = embedding_size
A__ = hidden_sizes
A__ = depths
A__ = layer_type
A__ = hidden_act
A__ = downsample_in_first_stage
A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = version.parse('1.11' )
@property
def snake_case__ ( self : List[Any] )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case__ ( self : Any )-> float:
'''simple docstring'''
return 1E-3
| 7
| 0
|
'''simple docstring'''
from typing import Any
class snake_case__ :
def __init__( self : Any , __a : Any ) -> Dict:
'''simple docstring'''
__snake_case : Tuple = data
__snake_case : Any = None
def __repr__( self : str ) -> str:
'''simple docstring'''
return f'''Node({self.data})'''
class snake_case__ :
def __init__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = None
def __iter__( self : Optional[Any] ) -> Any:
'''simple docstring'''
__snake_case : List[str] = self.head
while node:
yield node.data
__snake_case : List[str] = node.next
def __len__( self : List[str] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : List[Any] ) -> str:
'''simple docstring'''
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __getitem__( self : Any , __a : int ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : List[Any] , __a : int , __a : Any ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__snake_case : List[str] = self.head
for _ in range(UpperCamelCase__ ):
__snake_case : Optional[int] = current.next
__snake_case : List[Any] = data
def A_ ( self : Union[str, Any] , __a : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , UpperCamelCase__ )
def A_ ( self : str , __a : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , UpperCamelCase__ )
def A_ ( self : Dict , __a : int , __a : Any ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__snake_case : Union[str, Any] = Node(UpperCamelCase__ )
if self.head is None:
__snake_case : Optional[Any] = new_node
elif index == 0:
__snake_case : int = self.head # link new_node to head
__snake_case : Optional[Any] = new_node
else:
__snake_case : Union[str, Any] = self.head
for _ in range(index - 1 ):
__snake_case : Dict = temp.next
__snake_case : int = temp.next
__snake_case : Any = new_node
def A_ ( self : Tuple ) -> None: # print every node data
'''simple docstring'''
print(self )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def A_ ( self : Dict ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A_ ( self : Optional[Any] , __a : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__snake_case : List[Any] = self.head # default first node
if index == 0:
__snake_case : Union[str, Any] = self.head.next
else:
__snake_case : Optional[int] = self.head
for _ in range(index - 1 ):
__snake_case : Any = temp.next
__snake_case : Union[str, Any] = temp.next
__snake_case : int = temp.next.next
return delete_node.data
def A_ ( self : str ) -> bool:
'''simple docstring'''
return self.head is None
def A_ ( self : Any ) -> None:
'''simple docstring'''
__snake_case : List[str] = None
__snake_case : int = self.head
while current:
# Store the current node's next node.
__snake_case : Tuple = current.next
# Make the current node's next point backwards
__snake_case : Optional[int] = prev
# Make the previous node be the current node
__snake_case : List[str] = current
# Make the current node the next node (to progress iteration)
__snake_case : List[Any] = next_node
# Return prev in order to put the head at the end
__snake_case : Dict = prev
def a_ ( ) -> None:
__snake_case : Optional[Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(__SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(__SCREAMING_SNAKE_CASE ,i + 1 )
assert str(__SCREAMING_SNAKE_CASE ) == "->".join(str(__SCREAMING_SNAKE_CASE ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__SCREAMING_SNAKE_CASE ) == "->".join(str(__SCREAMING_SNAKE_CASE ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__SCREAMING_SNAKE_CASE ) == 9
assert str(__SCREAMING_SNAKE_CASE ) == "->".join(str(__SCREAMING_SNAKE_CASE ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
__snake_case : str = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__SCREAMING_SNAKE_CASE ) == "->".join(str(__SCREAMING_SNAKE_CASE ) for i in range(-8 ,1 ) )
def a_ ( ) -> None:
__snake_case : Union[str, Any] = [
-9,
1_00,
Node(77_34_51_12 ),
"dlrow olleH",
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__snake_case : int = LinkedList()
for i in test_input:
linked_list.insert_tail(__SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__snake_case : Union[str, Any] = linked_list.delete_head()
assert result == -9
assert (
str(__SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__snake_case : List[str] = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__snake_case : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(__SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__SCREAMING_SNAKE_CASE )
assert (
str(__SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a_ ( ) -> str:
from doctest import testmod
testmod()
__snake_case : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__SCREAMING_SNAKE_CASE )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__snake_case : Tuple = input('Enter New Value: ' ).strip()
print('New list:' )
print(__SCREAMING_SNAKE_CASE )
print(f'''length of linked_list is : {len(__SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
main()
| 369
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Tuple = pytest.mark.integration
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
__snake_case : Dict = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
__snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
__snake_case : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__snake_case : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__a )
__snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : str ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__snake_case : Dict = np.zeros(5 , dtype=np.floataa )
__snake_case : List[str] = 1
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case : Dict = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def A_ ( self : int ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case : List[str] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
__snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Tuple = faiss.IndexFlat(5 )
__snake_case : List[Any] = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
import faiss
__snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
__snake_case : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case : List[Any] = np.zeros(5 , dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : int = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( _UpperCAmelCase : str ) -> Optional[int]:
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
__snake_case : Dict = 'index.faiss'
__snake_case : Any = f'''mock://{index_name}'''
index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = np.zeros(5 ,dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : int = Elasticsearch()
__snake_case : Dict = {'acknowledged': True}
__snake_case : List[Any] = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case : Optional[Any] = 'foo'
__snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case : Dict = 'foo'
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case : List[Any] = ['foo', 'bar', 'foobar']
__snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : Any = index.search_batch(__a )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
__snake_case : Tuple = ['foo', 'bar', 'foobar']
__snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 0
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[str]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(lowercase_ , ["""torch"""] )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(lowercase_ , ["""torch"""] )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(lowercase_ , ["""torch"""] )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(lowercase_ , ["""torch"""] )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(lowercase_ , ["""torch"""] )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(lowercase_ , ["""torch"""] )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(lowercase_ , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[str]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[str]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[str]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[str]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Tuple:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Tuple:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[str]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Tuple:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""torch"""] )
class lowercase ( metaclass=SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = ['torch']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def _snake_case ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""torch"""] )
| 46
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1
UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 78
| 0
|
def lowercase_ ( A__ = 1000 ) -> int:
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 137
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Dict = ["image_processor", "tokenizer"]
UpperCAmelCase__ : Dict = "LayoutLMv2ImageProcessor"
UpperCAmelCase__ : Optional[Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__(self : str , _A : Any=None , _A : Tuple=None , **_A : Optional[Any] ) -> Optional[int]:
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _A , )
snake_case = kwargs.pop("feature_extractor" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A , _A )
def __call__(self : int , _A : List[str] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A : Union[List[List[int]], List[List[List[int]]]] = None , _A : Optional[Union[List[int], List[List[int]]]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
snake_case = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case = features["words"]
snake_case = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
snake_case = features.pop("pixel_values" )
if return_overflowing_tokens is True:
snake_case = self.get_overflowing_images(_A , encoded_inputs["overflow_to_sample_mapping"] )
snake_case = images
return encoded_inputs
def UpperCAmelCase(self : Dict , _A : Dict , _A : List[str] ) -> Optional[int]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(_A )} and {len(_A )}' )
return images_with_overflow
def UpperCAmelCase(self : Tuple , *_A : int , **_A : Dict ) -> str:
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase(self : str , *_A : List[Any] , **_A : List[Any] ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCAmelCase(self : Tuple ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase(self : List[Any] ) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _A , )
return self.image_processor_class
@property
def UpperCAmelCase(self : Dict ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _A , )
return self.image_processor
| 137
| 1
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
def decorator(snake_case__ : Optional[int] ):
_snake_case : List[Any] = getattr(snake_case__ , """handle_key""" , [] )
handle += [key]
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
def UpperCAmelCase__ (*snake_case__ : List[str] ):
"""simple docstring"""
def decorator(snake_case__ : List[Any] ):
_snake_case : Optional[Any] = getattr(snake_case__ , """handle_key""" , [] )
handle += keys
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
class lowercase( __a ):
'''simple docstring'''
def __new__( cls: str, a_: Optional[Any], a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = super().__new__(cls, a_, a_, a_ )
if not hasattr(a_, """key_handler""" ):
setattr(a_, """key_handler""", {} )
setattr(a_, """handle_input""", KeyHandler.handle_input )
for value in attrs.values():
_snake_case : List[Any] = getattr(a_, """handle_key""", [] )
for key in handled_keys:
_snake_case : Dict = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls: str ):
'''simple docstring'''
_snake_case : List[Any] = get_character()
if char != KEYMAP["undefined"]:
_snake_case : Union[str, Any] = ord(a_ )
_snake_case : Optional[int] = cls.key_handler.get(a_ )
if handler:
_snake_case : Optional[int] = char
return handler(cls )
else:
return None
def UpperCAmelCase__ (cls : Union[str, Any] ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 64
|
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__a: Optional[int] = 4
__a: Optional[Any] = 3
class UpperCAmelCase ( a__ ):
'''simple docstring'''
pass
def __UpperCamelCase ( UpperCAmelCase ):
for shard in shards:
for i in range(UpperCAmelCase ):
yield {"i": i, "shard": shard}
def __UpperCamelCase ( ):
lowercase__ : Tuple = int(os.environ['''RANK'''] )
lowercase__ : List[str] = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : Optional[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=UpperCAmelCase )
parser.add_argument('''--local_rank''' , type=UpperCAmelCase )
parser.add_argument('''--num_workers''' , type=UpperCAmelCase , default=0 )
lowercase__ : List[Any] = parser.parse_args()
lowercase__ : List[str] = args.streaming
lowercase__ : str = args.num_workers
lowercase__ : Optional[int] = {'''shards''': [F"""shard_{shard_idx}""" for shard_idx in range(UpperCAmelCase )]}
lowercase__ : Tuple = IterableDataset.from_generator(UpperCAmelCase , gen_kwargs=UpperCAmelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(UpperCAmelCase ) )
lowercase__ : str = split_dataset_by_node(UpperCAmelCase , rank=UpperCAmelCase , world_size=UpperCAmelCase )
lowercase__ : Optional[int] = torch.utils.data.DataLoader(UpperCAmelCase , num_workers=UpperCAmelCase )
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Optional[Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : Union[str, Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 198
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase: int = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[int] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361
|
# using dfs for finding eulerian path traversal
def a( A : int , A : Optional[Any] , A : Any , A : Optional[int]=None ) -> List[str]:
"""simple docstring"""
a = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
a , a = True, True
a = dfs(A , A , A , A )
return path
def a( A : List[str] , A : Optional[int] ) -> List[str]:
"""simple docstring"""
a = 0
a = -1
for i in range(A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
a = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a( A : str , A : str ) -> List[Any]:
"""simple docstring"""
a = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
a , a = check_circuit_or_path(A , A )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
a = 1
if check == 2:
a = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
a = dfs(A , A , A )
print(A )
def a( ) -> int:
"""simple docstring"""
a = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
a = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
a = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
a = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
a = {
1: [],
2: []
# all degree is zero
}
a = 10
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
if __name__ == "__main__":
main()
| 71
| 0
|
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def _lowerCAmelCase ( _UpperCamelCase : Iterable[str] , _UpperCamelCase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =iter(_UpperCAmelCase )
while True:
_SCREAMING_SNAKE_CASE =tuple(itertools.islice(_UpperCAmelCase , _UpperCAmelCase ) )
if not chunk:
return
yield chunk
def _lowerCAmelCase ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_SCREAMING_SNAKE_CASE =''
if len(_UpperCAmelCase ) < 2:
return dirty
for i in range(len(_UpperCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_UpperCAmelCase ) & 1:
clean += "X"
return clean
def _lowerCAmelCase ( _UpperCamelCase : str ) -> list[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_SCREAMING_SNAKE_CASE =[]
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_UpperCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_UpperCAmelCase )
return table
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =generate_table(_UpperCAmelCase )
_SCREAMING_SNAKE_CASE =prepare_input(_UpperCAmelCase )
_SCREAMING_SNAKE_CASE =''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCAmelCase , 2 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(table.index(_UpperCAmelCase ) , 5 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(table.index(_UpperCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =generate_table(_UpperCAmelCase )
_SCREAMING_SNAKE_CASE =''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCAmelCase , 2 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(table.index(_UpperCAmelCase ) , 5 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(table.index(_UpperCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 47
|
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = find_backend(' if not is_torch_available():')
self.assertEqual(A , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(A , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCAmelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(A , 'torch_and_transformers_and_onnx')
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A)
self.assertIn('torch_and_transformers' , A)
self.assertIn('flax_and_transformers' , A)
self.assertIn('torch_and_transformers_and_onnx' , A)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(A , '\nCONSTANT = None\n')
_UpperCAmelCase = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(A , A)
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , A)
| 339
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : List[str]=False ) -> Optional[Any]:
try:
UpperCamelCase : List[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase : Optional[Any] = strtobool(snake_case__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__UpperCAmelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def UpperCamelCase ( snake_case__ : int ) -> str:
return unittest.skip('Test was skipped' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Optional[Any]:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> int:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Any:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[Any]:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
if test_case is None:
return partial(snake_case__ , version=snake_case__ )
return unittest.skipUnless(is_torch_version('>=' , snake_case__ ) , F"""test requires torch version >= {version}""" )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[Any]:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(snake_case__ )
def UpperCamelCase ( snake_case__ : str ) -> Tuple:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Any ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(snake_case__ )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Optional[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(snake_case__ )
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = True
@classmethod
def snake_case_ ( cls ) -> Any:
UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
@classmethod
def snake_case_ ( cls ) -> Tuple:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case_ ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Optional[Any] = mocks if isinstance(SCREAMING_SNAKE_CASE_, (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def UpperCamelCase ( snake_case__ : Tuple ) -> Optional[int]:
UpperCamelCase : Tuple = AcceleratorState()
UpperCamelCase : Tuple = tensor[None].clone().to(state.device )
UpperCamelCase : str = gather(snake_case__ ).cpu()
UpperCamelCase : Union[str, Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , snake_case__ ):
return False
return True
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : List[Any] = returncode
UpperCamelCase : Tuple = stdout
UpperCamelCase : Any = stderr
async def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Union[str, Any]:
while True:
UpperCamelCase : List[str] = await stream.readline()
if line:
callback(snake_case__ )
else:
break
async def UpperCamelCase ( snake_case__ : Dict , snake_case__ : int=None , snake_case__ : Dict=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=False , snake_case__ : Union[str, Any]=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(snake_case__ ) )
UpperCamelCase : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase : Tuple = []
UpperCamelCase : int = []
def tee(snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any]="" ):
UpperCamelCase : Union[str, Any] = line.decode('utf-8' ).rstrip()
sink.append(snake_case__ )
if not quiet:
print(snake_case__ , snake_case__ , file=snake_case__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stderr , label='stderr:' ) ) ),
] , timeout=snake_case__ , )
return _RunOutput(await p.wait() , snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Any=None , snake_case__ : Tuple=None , snake_case__ : Any=180 , snake_case__ : Any=False , snake_case__ : Optional[int]=True ) -> _RunOutput:
UpperCamelCase : int = asyncio.get_event_loop()
UpperCamelCase : Tuple = loop.run_until_complete(
_stream_subprocess(snake_case__ , env=snake_case__ , stdin=snake_case__ , timeout=snake_case__ , quiet=snake_case__ , echo=snake_case__ ) )
UpperCamelCase : str = ' '.join(snake_case__ )
if result.returncode > 0:
UpperCamelCase : Union[str, Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowerCAmelCase_ ( a__ ):
pass
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : str=False ) -> int:
try:
UpperCamelCase : Union[str, Any] = subprocess.check_output(snake_case__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case__ , 'decode' ):
UpperCamelCase : Optional[int] = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(snake_case__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 103
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : List[str] = jnp.ones((batch_size, length) ) / length
return scores
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Optional[int] = 20
UpperCamelCase : Optional[Any] = self._get_uniform_logits(batch_size=2, length=SCREAMING_SNAKE_CASE_ )
# tweak scores to not be uniform anymore
UpperCamelCase : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCamelCase : Any = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCamelCase : List[str] = jax.nn.softmax(SCREAMING_SNAKE_CASE_, axis=-1 )
UpperCamelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : int = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCamelCase : Tuple = jax.nn.softmax(temp_dist_warper_sharper(SCREAMING_SNAKE_CASE_, scores.copy(), cur_len=SCREAMING_SNAKE_CASE_ ), axis=-1 )
UpperCamelCase : Any = jax.nn.softmax(temp_dist_warper_smoother(SCREAMING_SNAKE_CASE_, scores.copy(), cur_len=SCREAMING_SNAKE_CASE_ ), axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min() )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = None
UpperCamelCase : Any = 10
UpperCamelCase : Any = 2
# create ramp distribution
UpperCamelCase : List[Any] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, vocab_size) ).copy()
UpperCamelCase : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
UpperCamelCase : Tuple = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist(), 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist(), 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCamelCase : Optional[int] = 5
UpperCamelCase : Optional[int] = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3 )
UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, length) ).copy()
UpperCamelCase : List[str] = top_k_warp_safety_check(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist(), [2, 2] )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : int = None
UpperCamelCase : List[str] = 10
UpperCamelCase : Optional[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCamelCase : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCamelCase : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
UpperCamelCase : int = np.exp(top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCamelCase : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCamelCase : Optional[Any] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCamelCase : Tuple = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
UpperCamelCase : int = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0 )
UpperCamelCase : List[str] = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist(), [3, 2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = 20
UpperCamelCase : Union[str, Any] = 4
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that min length is applied at length 5
UpperCamelCase : List[str] = ids_tensor((batch_size, 20), vocab_size=20 )
UpperCamelCase : Any = 5
UpperCamelCase : Tuple = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = min_dist_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
UpperCamelCase : Any = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = 15
UpperCamelCase : str = min_dist_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : str = 20
UpperCamelCase : List[Any] = 4
UpperCamelCase : List[str] = 0
UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the bos_token_id score
UpperCamelCase : Any = ids_tensor((batch_size, 1), vocab_size=20 )
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCamelCase : Dict = 3
UpperCamelCase : str = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Union[str, Any] = 20
UpperCamelCase : Optional[Any] = 4
UpperCamelCase : List[Any] = 0
UpperCamelCase : int = 5
UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCamelCase : str = ids_tensor((batch_size, 4), vocab_size=20 )
UpperCamelCase : Tuple = 4
UpperCamelCase : Union[str, Any] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCamelCase : str = 3
UpperCamelCase : List[Any] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> int:
UpperCamelCase : int = 4
UpperCamelCase : Tuple = 10
UpperCamelCase : str = 15
UpperCamelCase : List[str] = 2
UpperCamelCase : Any = 1
UpperCamelCase : List[str] = 15
# dummy input_ids and scores
UpperCamelCase : Dict = ids_tensor((batch_size, sequence_length), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = input_ids.copy()
UpperCamelCase : Any = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = scores.copy()
# instantiate all dist processors
UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = 10
# no processor list
UpperCamelCase : Any = temp_dist_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = min_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = bos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = eos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# with processor list
UpperCamelCase : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase : Optional[Any] = processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = 4
UpperCamelCase : Tuple = 10
UpperCamelCase : Union[str, Any] = 15
UpperCamelCase : Union[str, Any] = 2
UpperCamelCase : Optional[Any] = 1
UpperCamelCase : int = 15
# dummy input_ids and scores
UpperCamelCase : Dict = ids_tensor((batch_size, sequence_length), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = input_ids.copy()
UpperCamelCase : Optional[int] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = scores.copy()
# instantiate all dist processors
UpperCamelCase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
UpperCamelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase : str = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 10
# no processor list
def run_no_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = temp_dist_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = min_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = bos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = eos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
return scores
# with processor list
def run_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase : Union[str, Any] = processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
return scores
UpperCamelCase : Dict = jax.jit(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = jax.jit(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = jitted_run_no_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = jitted_run_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
| 103
| 1
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _snake_case ( lowercase__ : Namespace ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__UpperCAmelCase = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _SCREAMING_SNAKE_CASE ( A__ ):
@staticmethod
def __lowerCAmelCase ( __A ) -> List[str]:
lowerCAmelCase_ :Dict = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Model\'s type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=__UpperCAmelCase , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self , __A , __A , __A , __A , __A , *__A , ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
lowerCAmelCase_ :int = model_type
lowerCAmelCase_ :Dict = tf_checkpoint
lowerCAmelCase_ :str = pytorch_dump_output
lowerCAmelCase_ :Dict = config
lowerCAmelCase_ :Any = finetuning_task_name
def __lowerCAmelCase ( self ) -> Optional[Any]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
lowerCAmelCase_ :Optional[int] = self._tf_checkpoint
lowerCAmelCase_ :List[Any] = """"""
else:
lowerCAmelCase_ :Dict = self._tf_checkpoint
lowerCAmelCase_ :Union[str, Any] = """"""
convert_transfo_xl_checkpoint_to_pytorch(
__UpperCAmelCase , self._config , self._pytorch_dump_output , __UpperCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 84
|
from math import factorial
UpperCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)}
def _a ( a :int ) -> int:
if not isinstance(a , a ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a ) )
def _a ( a :int = 60 , a :int = 1_000_000 ) -> int:
if not isinstance(a , a ) or not isinstance(a , a ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
a = 0
# the cached sizes of the previous chains
a = {}
for start_chain_element in range(1 , a ):
# The temporary set will contain the elements of the chain
a = set()
a = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
a = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a )
chain_set_length += 1
a = digit_factorial_sum(a )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
a = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 0
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCamelCase = None
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowerCamelCase = """▁"""
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = BarthezTokenizer
def __init__( self : str , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[int]="<s>" , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : List[str]="<s>" , _UpperCAmelCase : Tuple="<unk>" , _UpperCAmelCase : str="<pad>" , _UpperCAmelCase : List[Any]="<mask>" , **_UpperCAmelCase : Dict , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 241
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''roberta'''
def __init__( self : int , _UpperCAmelCase : List[Any]=50265 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[Any]=1e-12 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple="absolute" , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=None , **_UpperCAmelCase : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 241
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = BertJapaneseTokenizer
_UpperCAmelCase : Any = False
_UpperCAmelCase : Dict = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
super().setUp()
SCREAMING_SNAKE_CASE_: Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
SCREAMING_SNAKE_CASE_: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Optional[int] = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE_: Dict = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.get_input_output_texts(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__)
return text, ids
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Dict):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: int = self.tokenizer_class(self.vocab_file)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。")
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab")
self.assertIsNotNone(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE_: int = os.path.join(self.tmpdirname , "tokenizer.bin")
with open(lowerCAmelCase__ , "wb") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , "rb") as handle:
SCREAMING_SNAKE_CASE_: List[Any] = pickle.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = MecabTokenizer(mecab_dic="ipadic")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
try:
SCREAMING_SNAKE_CASE_: List[str] = MecabTokenizer(mecab_dic="unidic_lite")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : str):
try:
SCREAMING_SNAKE_CASE_: Tuple = MecabTokenizer(mecab_dic="unidic")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic="ipadic")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : int):
try:
SCREAMING_SNAKE_CASE_: int = MecabTokenizer(
do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Dict = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic="ipadic")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi")
self.assertIsNotNone(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_: Any = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE_: Tuple = os.path.join(self.tmpdirname , "tokenizer.bin")
with open(lowerCAmelCase__ , "wb") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , "rb") as handle:
SCREAMING_SNAKE_CASE_: str = pickle.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = SudachiTokenizer(sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A")
self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国", "人", "参政", "権"])
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B")
self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国人", "参政権"])
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C")
self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国人参政権"])
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Optional[Any] = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: str = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[Any] = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp")
self.assertIsNotNone(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_: Dict = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin")
with open(lowerCAmelCase__ , "wb") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , "rb") as handle:
SCREAMING_SNAKE_CASE_: Dict = pickle.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = JumanppTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = JumanppTokenizer(normalize_text=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = JumanppTokenizer(trim_whitespace=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。") , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
SCREAMING_SNAKE_CASE_: Optional[int] = {}
for i, token in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[Any] = i
SCREAMING_SNAKE_CASE_: Union[str, Any] = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("こんにちは") , ["こんにちは"])
self.assertListEqual(tokenizer.tokenize("こんばんは") , ["こん", "##ばんは"])
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは") , ["こん", "##ばんは", "[UNK]", "こんにちは"])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: str = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE_: List[Any] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。")
self.assertListEqual(lowerCAmelCase__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"])
SCREAMING_SNAKE_CASE_: str = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは")
self.assertListEqual(lowerCAmelCase__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese")
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = BertJapaneseTokenizer
_UpperCAmelCase : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
super().setUp()
SCREAMING_SNAKE_CASE_: Any = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE_: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowerCAmelCase__ : Union[str, Any]):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[Any] = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE_: Union[str, Any] = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Tuple):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : int):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Any):
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character")
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。")
self.assertListEqual(
lowerCAmelCase__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE_: List[Any] = {}
for i, token in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = i
SCREAMING_SNAKE_CASE_: Optional[int] = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("こんにちは") , ["こ", "ん", "に", "ち", "は"])
self.assertListEqual(tokenizer.tokenize("こんにちほ") , ["こ", "ん", "に", "ち", "[UNK]"])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: str = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char")
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = "cl-tohoku/bert-base-japanese"
SCREAMING_SNAKE_CASE_: List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING") as cm:
BertTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from."))
SCREAMING_SNAKE_CASE_: Union[str, Any] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING") as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from."))
| 13
|
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
| 1
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Tuple:
_a : Tuple = tempfile.mkdtemp()
_a : List[str] = 5
# Realm tok
_a : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Optional[int] = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_a : str = os.path.join(_a , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : List[str] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(_a , exist_ok=_a )
def __lowercase ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def __lowercase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> List[str]:
_a : int = RealmConfig(num_block_records=self.num_block_records )
return config
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def __lowercase ( self ) -> Dict:
_a : Any = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=_a , )
return block_records
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowercase ( self ) -> Optional[int]:
_a : Dict = self.get_config()
_a : Tuple = self.get_dummy_retriever()
_a : Dict = retriever.tokenizer
_a : Union[str, Any] = np.array([0, 3] , dtype='''long''' )
_a : Any = tokenizer(['''Test question'''] ).input_ids
_a : int = tokenizer(
['''the fourth'''] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids
_a : str = config.reader_seq_len
_a : Any = retriever(
_a , _a , answer_ids=_a , max_length=_a , return_tensors='''np''' )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[int] = self.get_config()
_a : Union[str, Any] = self.get_dummy_retriever()
_a : Tuple = retriever.tokenizer
_a : List[str] = np.array([0, 3, 5] , dtype='''long''' )
_a : int = tokenizer(['''Test question'''] ).input_ids
_a : List[str] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids
_a : Union[str, Any] = config.reader_seq_len
_a : List[Any] = retriever(
_a , _a , answer_ids=_a , max_length=_a , return_tensors='''np''' )
self.assertEqual([False, True, True] , _a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _a )
def __lowercase ( self ) -> int:
_a : Any = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_a : str = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_a : Tuple = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_a : str = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 352
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Any , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='''crop_size''')
SCREAMING_SNAKE_CASE_ : List[Any] = do_resize
SCREAMING_SNAKE_CASE_ : Tuple = size
SCREAMING_SNAKE_CASE_ : str = resample
SCREAMING_SNAKE_CASE_ : Any = do_center_crop
SCREAMING_SNAKE_CASE_ : Any = crop_size
SCREAMING_SNAKE_CASE_ : Dict = do_rescale
SCREAMING_SNAKE_CASE_ : Any = rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ : str = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ : str = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_resize_output_image_size(lowercase_ , size=size['''shortest_edge'''] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ):
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(lowercase_ , param_name='''size''' , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(lowercase_ , param_name='''crop_size''' , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ : Optional[int] = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ : int = [convert_to_rgb(lowercase_) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Tuple = [to_numpy_array(lowercase_) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : str = [self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Dict = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
SCREAMING_SNAKE_CASE_ : List[str] = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 91
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[Any]=[] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = size[0] - overlap_pixels * 2
UpperCAmelCase_: Dict = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCAmelCase_: Union[str, Any] = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
UpperCAmelCase_: Optional[int] = np.pad(lowerCAmelCase__ , mode="""linear_ramp""" , pad_width=lowerCAmelCase__ , end_values=0 )
if "l" in remove_borders:
UpperCAmelCase_: List[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCAmelCase_: Optional[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCAmelCase_: Optional[int] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCAmelCase_: int = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str , lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
return max(lowerCAmelCase__ , min(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowerCAmelCase_ (lowerCAmelCase__: [int] , lowerCAmelCase__: [int] , lowerCAmelCase__: [int] ):
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCAmelCase_ (lowerCAmelCase__: [int] , lowerCAmelCase__: int , lowerCAmelCase__: [int] ):
"""simple docstring"""
UpperCAmelCase_: str = list(lowerCAmelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCAmelCase_: int = clamp_rect(lowerCAmelCase__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCAmelCase__ , (original_slice, 0) )
return result
def lowerCAmelCase_ (lowerCAmelCase__: Dict , lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: Dict = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCAmelCase_: Optional[int] = tile.crop(lowerCAmelCase__ )
return tile
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: str = n % d
return n - divisor
class _a ( _lowerCAmelCase ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 350, ) -> str:
super().__init__(
vae=SCREAMING_SNAKE_CASE_, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, unet=SCREAMING_SNAKE_CASE_, low_res_scheduler=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_, max_noise_level=SCREAMING_SNAKE_CASE_, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase_: Dict = (
min(image.size[0] - (tile_size + original_image_slice), x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice), y * tile_size ),
min(image.size[0], (x + 1) * tile_size ),
min(image.size[1], (y + 1) * tile_size ),
)
UpperCAmelCase_: Tuple = add_overlap_rect(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, image.size )
UpperCAmelCase_: List[str] = image.crop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCAmelCase_: List[Any] = translated_slice_x - (original_image_slice / 2)
UpperCAmelCase_: str = max(0, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = squeeze_tile(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = to_input.size
UpperCAmelCase_: Any = to_input.resize((tile_size, tile_size), Image.BICUBIC )
UpperCAmelCase_: str = super(SCREAMING_SNAKE_CASE_, self ).__call__(image=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).images[0]
UpperCAmelCase_: Optional[int] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC )
UpperCAmelCase_: int = unsqueeze_tile(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC )
UpperCAmelCase_: Union[str, Any] = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
UpperCAmelCase_: Tuple = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=SCREAMING_SNAKE_CASE_ ), mode="""L""", )
final_image.paste(
SCREAMING_SNAKE_CASE_, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 75, SCREAMING_SNAKE_CASE_ = 9.0, SCREAMING_SNAKE_CASE_ = 50, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 128, SCREAMING_SNAKE_CASE_ = 32, SCREAMING_SNAKE_CASE_ = 32, ) -> Dict:
UpperCAmelCase_: int = Image.new("""RGB""", (image.size[0] * 4, image.size[1] * 4) )
UpperCAmelCase_: str = math.ceil(image.size[0] / tile_size )
UpperCAmelCase_: int = math.ceil(image.size[1] / tile_size )
UpperCAmelCase_: Dict = tcx * tcy
UpperCAmelCase_: Optional[Any] = 0
for y in range(SCREAMING_SNAKE_CASE_ ):
for x in range(SCREAMING_SNAKE_CASE_ ):
self._process_tile(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, prompt=SCREAMING_SNAKE_CASE_, num_inference_steps=SCREAMING_SNAKE_CASE_, guidance_scale=SCREAMING_SNAKE_CASE_, noise_level=SCREAMING_SNAKE_CASE_, negative_prompt=SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_, eta=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, latents=SCREAMING_SNAKE_CASE_, )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCAmelCase_: Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase__ , revision="""fp16""" , torch_dtype=torch.floataa )
UpperCAmelCase_: str = pipe.to("""cuda""" )
UpperCAmelCase_: List[str] = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(lowerCAmelCase__: Dict ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save("""diffusers_library_progress.jpg""" )
UpperCAmelCase_: Optional[int] = pipe(image=lowerCAmelCase__ , prompt="""Black font, white background, vector""" , noise_level=4_0 , callback=lowerCAmelCase__ )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 147
| 0
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case_(_UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = filter(lambda _UpperCamelCase : p.requires_grad , model.parameters() )
_snake_case = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A = logging.getLogger(__name__)
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
if metric == "rouge2":
_snake_case = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_snake_case = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_snake_case = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
_snake_case = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_snake_case = ModelCheckpoint(
dirpath=_UpperCamelCase , filename=_UpperCamelCase , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=_UpperCamelCase , verbose=_UpperCamelCase , )
class lowercase_ ( pl.Callback ):
def UpperCamelCase_ ( self : Optional[Any] , A__ : Optional[Any] , A__ : Optional[int] ) -> List[str]:
_snake_case = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(A__ )
@rank_zero_only
def UpperCamelCase_ ( self : Optional[int] , A__ : pl.Trainer , A__ : pl.LightningModule , A__ : str , A__ : List[str]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_snake_case = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_snake_case = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case = od / '''test_results.txt'''
_snake_case = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
_snake_case = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=A__ )
generations_file.parent.mkdir(exist_ok=A__ )
with open(A__ , '''a+''' ) as writer:
for key in sorted(A__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case = metrics[key]
if isinstance(A__ , torch.Tensor ):
_snake_case = val.item()
_snake_case = f"""{key}: {val:.6f}\n"""
writer.write(A__ )
if not save_generations:
return
if "preds" in metrics:
_snake_case = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(A__ )
@rank_zero_only
def UpperCamelCase_ ( self : Tuple , A__ : Any , A__ : List[str] ) -> Any:
try:
_snake_case = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case = pl_module.model.num_parameters()
_snake_case = count_trainable_parameters(A__ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase_ ( self : List[str] , A__ : pl.Trainer , A__ : pl.LightningModule ) -> Union[str, Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(A__ , A__ , '''test''' )
@rank_zero_only
def UpperCamelCase_ ( self : Union[str, Any] , A__ : pl.Trainer , A__ : Optional[Any] ) -> Union[str, Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 355
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__A = {'''allegro/herbert-base-cased''': 5_14}
__A = {}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : Any = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] = HerbertTokenizer
def __init__( self : Tuple , A__ : str=None , A__ : Optional[Any]=None , A__ : Union[str, Any]=None , A__ : Optional[int]="<s>" , A__ : Optional[int]="<unk>" , A__ : str="<pad>" , A__ : List[Any]="<mask>" , A__ : Dict="</s>" , **A__ : Optional[int] , ) -> Optional[int]:
super().__init__(
A__ , A__ , tokenizer_file=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , sep_token=A__ , **A__ , )
def UpperCamelCase_ ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Tuple , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
def UpperCamelCase_ ( self : Any , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 278
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_: Dict ={
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: str =[
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1
|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = "ViltImageProcessor"
SCREAMING_SNAKE_CASE__ : int = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a__=None , a__=None , **a__ ):
_lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a__ , )
_lowerCamelCase = kwargs.pop('feature_extractor' )
_lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a__ , a__ )
_lowerCamelCase = self.image_processor
def __call__( self , a__ , a__ = None , a__ = True , a__ = False , a__ = None , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ):
_lowerCamelCase = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
# add pixel_values + pixel_mask
_lowerCamelCase = self.image_processor(a__ , return_tensors=a__ )
encoding.update(a__ )
return encoding
def snake_case_ ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def snake_case_ ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def snake_case_ ( self ):
_lowerCamelCase = self.tokenizer.model_input_names
_lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a__ , )
return self.image_processor_class
@property
def snake_case_ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a__ , )
return self.image_processor
| 80
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Any =logging.get_logger(__name__)
A_ : Dict =Dict[str, Any]
A_ : Dict =List[Prediction]
@add_end_docstrings(lowerCAmelCase__ )
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def snake_case_ ( self , **a__ ):
_lowerCamelCase = {}
if "threshold" in kwargs:
_lowerCamelCase = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *a__ , **a__ ):
return super().__call__(*a__ , **a__ )
def snake_case_ ( self , a__ ):
_lowerCamelCase = load_image(a__ )
_lowerCamelCase = torch.IntTensor([[image.height, image.width]] )
_lowerCamelCase = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
_lowerCamelCase = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
_lowerCamelCase = target_size
return inputs
def snake_case_ ( self , a__ ):
_lowerCamelCase = model_inputs.pop('target_size' )
_lowerCamelCase = self.model(**a__ )
_lowerCamelCase = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
_lowerCamelCase = model_inputs['bbox']
return model_outputs
def snake_case_ ( self , a__ , a__=0.9 ):
_lowerCamelCase = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_lowerCamelCase , _lowerCamelCase = target_size[0].tolist()
def unnormalize(a__ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
_lowerCamelCase , _lowerCamelCase = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_lowerCamelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_lowerCamelCase = [unnormalize(a__ ) for bbox in model_outputs['bbox'].squeeze(0 )]
_lowerCamelCase = ['score', 'label', 'box']
_lowerCamelCase = [dict(zip(a__ , a__ ) ) for vals in zip(scores.tolist() , a__ , a__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_lowerCamelCase = self.image_processor.post_process_object_detection(a__ , a__ , a__ )
_lowerCamelCase = raw_annotations[0]
_lowerCamelCase = raw_annotation['scores']
_lowerCamelCase = raw_annotation['labels']
_lowerCamelCase = raw_annotation['boxes']
_lowerCamelCase = scores.tolist()
_lowerCamelCase = [self.model.config.idalabel[label.item()] for label in labels]
_lowerCamelCase = [self._get_bounding_box(a__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_lowerCamelCase = ['score', 'label', 'box']
_lowerCamelCase = [
dict(zip(a__ , a__ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def snake_case_ ( self , a__ ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = box.int().tolist()
_lowerCamelCase = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 80
| 1
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__a = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _lowercase ( cls : Any ) -> Optional[Any]:
lowercase_ = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase ( cls : int ) -> str:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def _lowercase ( self : List[Any] ) -> str:
lowercase_ = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
lowercase_ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(SCREAMING_SNAKE_CASE_ , repo_id='''test-model-flax''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 , msg=f'''{key} not identical''' )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase_ = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
lowercase_ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 , msg=f'''{key} not identical''' )
def a ( snake_case__: Any , snake_case__: List[str] ):
'''simple docstring'''
lowercase_ = True
lowercase_ = flatten_dict(modela.params )
lowercase_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowercase_ = False
return models_are_equal
@require_flax
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : int ) -> Union[str, Any]:
lowercase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase_ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
lowercase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ )
self.assertTrue(check_models_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
lowercase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase_ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
lowercase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , max_shard_size='''10KB''' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ )
self.assertTrue(check_models_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Tuple ) -> List[str]:
lowercase_ = '''bert'''
lowercase_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> str:
lowercase_ = '''bert'''
lowercase_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 30
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Optional[Any]=10 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : str=32 * 4 , lowerCAmelCase__ : Union[str, Any]=32 * 6 , lowerCAmelCase__ : Union[str, Any]=4 , lowerCAmelCase__ : Optional[Any]=32 , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = is_training
_UpperCamelCase = use_auxiliary_loss
_UpperCamelCase = num_queries
_UpperCamelCase = num_channels
_UpperCamelCase = min_size
_UpperCamelCase = max_size
_UpperCamelCase = num_labels
_UpperCamelCase = mask_feature_size
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase__ )
_UpperCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase__ )
_UpperCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase__ ) > 0.5
).float()
_UpperCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase__ ) > 0.5).long()
_UpperCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = output.encoder_hidden_states
_UpperCamelCase = output.pixel_decoder_hidden_states
_UpperCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , config.decoder_config.decoder_layers )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=False ) -> Dict:
'''simple docstring'''
with torch.no_grad():
_UpperCamelCase = MaskFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MaskFormerForInstanceSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
def comm_check_on_output(lowerCAmelCase__ : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCamelCase = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
_UpperCamelCase = model(
pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_snake_case : List[str] = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_snake_case : int = False
_snake_case : Dict = False
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = MaskFormerModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase__ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCamelCase = MaskFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase = (self.model_tester.min_size,) * 2
_UpperCamelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCAmelCase__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowerCAmelCase__ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowerCAmelCase__ ).long(),
}
_UpperCamelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCAmelCase__ )
_UpperCamelCase = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
_UpperCamelCase = model(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCamelCase = self.all_model_classes[1]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
_UpperCamelCase = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).loss
loss.backward()
def snake_case__ ( self : Optional[int] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.all_model_classes[1]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
_UpperCamelCase = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
_UpperCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ : Any = 1E-4
def a__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(lowerCAmelCase__ )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
_UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
_UpperCamelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
_UpperCamelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
_UpperCamelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCAmelCase__ )
.eval()
)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
_UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
# masks_queries_logits
_UpperCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCamelCase = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
_UpperCamelCase = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
_UpperCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCamelCase = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(lowerCAmelCase__ )
.eval()
)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
_UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
# masks_queries_logits
_UpperCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCamelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_UpperCamelCase = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
_UpperCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCamelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCAmelCase__ )
.eval()
)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_UpperCamelCase = inputs['''pixel_values'''].to(lowerCAmelCase__ )
_UpperCamelCase = [el.to(lowerCAmelCase__ ) for el in inputs['''mask_labels''']]
_UpperCamelCase = [el.to(lowerCAmelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 287
|
'''simple docstring'''
from math import isclose, sqrt
def a__ ( lowercase : float, lowercase : float, lowercase : float ) -> tuple[float, float, float]:
"""simple docstring"""
_UpperCamelCase = point_y / 4 / point_x
_UpperCamelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_UpperCamelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_UpperCamelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_UpperCamelCase = outgoing_gradient**2 + 4
_UpperCamelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_UpperCamelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
_UpperCamelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_UpperCamelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_UpperCamelCase = x_minus if isclose(lowercase, lowercase ) else x_plus
_UpperCamelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a__ ( lowercase : float = 1.4, lowercase : float = -9.6 ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = first_x_coord
_UpperCamelCase = first_y_coord
_UpperCamelCase = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = next_point(lowercase, lowercase, lowercase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 287
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=None ) -> Any:
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase : int = tf.cast(tf.math.not_equal(_UpperCAmelCase, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __A :
lowerCAmelCase_ : List[str] = OPTConfig
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : Union[str, Any] = 'gelu'
def __init__( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[Any]=99 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=20 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : str=16 , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : Tuple = batch_size
lowerCAmelCase : str = seq_length
lowerCAmelCase : Tuple = is_training
lowerCAmelCase : str = use_labels
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : Optional[Any] = eos_token_id
lowerCAmelCase : Optional[int] = pad_token_id
lowerCAmelCase : Optional[Any] = bos_token_id
lowerCAmelCase : int = embed_dim
lowerCAmelCase : List[Any] = word_embed_proj_dim
lowerCAmelCase : Tuple = False
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCAmelCase_ , **self.config_updates , )
lowerCAmelCase : Any = prepare_opt_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def lowercase__ ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : int = TFOPTModel(config=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = inputs_dict["""input_ids"""]
lowerCAmelCase : Optional[int] = input_ids[:1, :]
lowerCAmelCase : Optional[int] = inputs_dict["""attention_mask"""][:1, :]
lowerCAmelCase : List[Any] = 1
# first forward pass
lowerCAmelCase : int = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase : str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3 )
@require_tf
class __A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase_ : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase_ : List[str] = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase_ : int = False
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[int] = 10
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = TFOPTModelTester(self )
lowerCAmelCase : Any = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
if hasattr(UpperCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCAmelCase : Any = model_class(config=UpperCAmelCase_ )
lowerCAmelCase : str = _get_word_embedding_weight(UpperCAmelCase_ , model.get_input_embeddings() )
lowerCAmelCase : int = _get_word_embedding_weight(UpperCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCAmelCase_ )
lowerCAmelCase : Tuple = _get_word_embedding_weight(UpperCAmelCase_ , model.get_input_embeddings() )
lowerCAmelCase : Dict = _get_word_embedding_weight(UpperCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase : Optional[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCAmelCase_ )
# check that weights remain the same after resizing
lowerCAmelCase : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase : str = False
self.assertTrue(UpperCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase : List[str] = False
self.assertTrue(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return tf.constant(_UpperCAmelCase, dtype=tf.intaa )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : List[str] = 99
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase : Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase : Union[str, Any] = input_ids.shape[0]
lowerCAmelCase : Union[str, Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase : Optional[int] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCAmelCase : int = tf.not_equal(UpperCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase : Dict = model(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).last_hidden_state
lowerCAmelCase : List[str] = (1, 11, 512)
self.assertEqual(output.shape , UpperCAmelCase_ )
lowerCAmelCase : Dict = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=4E-3 ) )
lowerCAmelCase : List[Any] = tf.function(UpperCAmelCase_ , jit_compile=UpperCAmelCase_ )
lowerCAmelCase : Tuple = xla_generate(UpperCAmelCase_ , UpperCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=4E-2 ) )
@require_tf
@slow
class __A ( unittest.TestCase ):
def lowercase__ ( self : str ):
super().setUp()
lowerCAmelCase : Tuple = """facebook/opt-350m"""
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase : Dict = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase : Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase : Dict = tokenizer(UpperCAmelCase_ , return_tensors='tf' , padding=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase : List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[1.38_51, -13.8923, -10.5229, -10.7533, -0.23_09, -10.2384, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.6276, -3.94_15, -21.5242, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.1650, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.7926, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-4 ) )
lowerCAmelCase : Dict = tf.function(UpperCAmelCase_ , jit_compile=UpperCAmelCase_ )
lowerCAmelCase : List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-4 ) )
@require_tf
@slow
class __A ( unittest.TestCase ):
@property
def lowercase__ ( self : Optional[Any] ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase__ ( self : Dict ):
lowerCAmelCase : Tuple = """facebook/opt-125m"""
lowerCAmelCase : int = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCAmelCase : Dict = []
lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
for prompt in self.prompts:
lowerCAmelCase : List[str] = tokenizer(UpperCAmelCase_ , return_tensors='tf' ).input_ids
lowerCAmelCase : str = model.generate(UpperCAmelCase_ , max_length=10 )
lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = """facebook/opt-350m"""
lowerCAmelCase : Union[str, Any] = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : str = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : Dict = """left"""
# use different length sentences to test batching
lowerCAmelCase : List[str] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCAmelCase : str = tokenizer(UpperCAmelCase_ , return_tensors='tf' , padding=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = inputs["""input_ids"""]
lowerCAmelCase : List[str] = model.generate(input_ids=UpperCAmelCase_ , attention_mask=inputs['attention_mask'] )
lowerCAmelCase : Union[str, Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase : Dict = model.generate(input_ids=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase : List[str] = model.generate(input_ids=UpperCAmelCase_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase : Dict = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
lowerCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase_ )
lowerCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [non_padded_sentence, padded_sentence] )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = """facebook/opt-350m"""
lowerCAmelCase : List[Any] = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCAmelCase : int = []
lowerCAmelCase : str = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : Any = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
for prompt in self.prompts:
lowerCAmelCase : int = tokenizer(UpperCAmelCase_ , return_tensors='tf' ).input_ids
lowerCAmelCase : Tuple = model.generate(UpperCAmelCase_ , max_length=10 )
lowerCAmelCase : List[str] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 138
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = 'vit_mae'
def __init__( self : int , lowerCAmelCase : int=768 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Any=12 , lowerCAmelCase : Union[str, Any]=3072 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : str=0.0 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Optional[Any]=224 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Any=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=16 , lowerCAmelCase : str=512 , lowerCAmelCase : int=8 , lowerCAmelCase : Union[str, Any]=2048 , lowerCAmelCase : Tuple=0.7_5 , lowerCAmelCase : str=False , **lowerCAmelCase : Tuple , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE_: Tuple =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =initializer_range
SCREAMING_SNAKE_CASE_: int =layer_norm_eps
SCREAMING_SNAKE_CASE_: List[str] =image_size
SCREAMING_SNAKE_CASE_: Dict =patch_size
SCREAMING_SNAKE_CASE_: str =num_channels
SCREAMING_SNAKE_CASE_: List[str] =qkv_bias
SCREAMING_SNAKE_CASE_: List[str] =decoder_num_attention_heads
SCREAMING_SNAKE_CASE_: Any =decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[Any] =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE_: str =decoder_intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =mask_ratio
SCREAMING_SNAKE_CASE_: List[str] =norm_pix_loss
| 173
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 2
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: Dict = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) )
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: Union[str, Any] = mock.Mock()
lowercase__: str = 500
lowercase__: Union[str, Any] = {}
lowercase__: List[str] = HTTPError
lowercase__: int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head:
lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
def _snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' )
lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
| 2
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "microsoft/speecht5_tts"
snake_case__ = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
snake_case__ = "text_reader"
snake_case__ = SpeechTaProcessor
snake_case__ = SpeechTaForTextToSpeech
snake_case__ = SpeechTaHifiGan
snake_case__ = ["text"]
snake_case__ = ["audio"]
def __lowerCAmelCase ( self : Dict ):
if self.post_processor is None:
UpperCAmelCase__ = 'microsoft/speecht5_hifigan'
super().setup()
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict=None ):
UpperCAmelCase__ = self.pre_processor(text=lowerCamelCase__ ,return_tensors='pt' ,truncation=lowerCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
UpperCAmelCase__ = load_dataset('Matthijs/cmu-arctic-xvectors' ,split='validation' )
UpperCAmelCase__ = torch.tensor(embeddings_dataset[7_305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[str] ):
with torch.no_grad():
return self.model.generate_speech(**lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Optional[int] ):
with torch.no_grad():
return self.post_processor(lowerCamelCase__ ).cpu().detach()
| 98
|
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**9 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class __lowerCamelCase ( __lowercase ):
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 364
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 0
|
"""simple docstring"""
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a = 16
_a = 32
def __a ( __lowerCamelCase, __lowerCamelCase = 16 ):
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Optional[Any] = load_dataset("glue", "mrpc" )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Optional[Any] = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__lowerCamelCase, max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Optional[Any] = datasets.map(
__lowerCamelCase, batched=__lowerCamelCase, remove_columns=["idx", "sentence1", "sentence2"], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[int] = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : int = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : Optional[Any] = 8
else:
UpperCAmelCase_ : Any = None
return tokenizer.pad(
__lowerCamelCase, padding="longest", max_length=__lowerCamelCase, pad_to_multiple_of=__lowerCamelCase, return_tensors="pt", )
# Instantiate dataloaders.
UpperCAmelCase_ : Optional[int] = DataLoader(
tokenized_datasets["train"], shuffle=__lowerCamelCase, collate_fn=__lowerCamelCase, batch_size=__lowerCamelCase, drop_last=__lowerCamelCase )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"], shuffle=__lowerCamelCase, collate_fn=__lowerCamelCase, batch_size=__lowerCamelCase, drop_last=(accelerator.mixed_precision == "fp8"), )
return train_dataloader, eval_dataloader
def __a ( __lowerCamelCase, __lowerCamelCase ):
# Initialize accelerator
UpperCAmelCase_ : Any = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : str = int(config["num_epochs"] )
UpperCAmelCase_ : Dict = int(config["seed"] )
UpperCAmelCase_ : str = int(config["batch_size"] )
UpperCAmelCase_ : str = evaluate.load("glue", "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : Any = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : Optional[Any] = MAX_GPU_BATCH_SIZE
set_seed(__lowerCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : str = get_dataloaders(__lowerCamelCase, __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : int = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : str = AdamW(params=model.parameters(), lr=__lowerCamelCase )
# Instantiate scheduler
UpperCAmelCase_ : Any = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase, num_warmup_steps=100, num_training_steps=(len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : Union[str, Any] = model(**__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = outputs.loss
UpperCAmelCase_ : int = loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**__lowerCamelCase )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__lowerCamelCase, references=__lowerCamelCase, )
UpperCAmelCase_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", __lowerCamelCase )
def __a ( ):
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision", type=__lowerCamelCase, default=__lowerCamelCase, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
main()
| 61
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
_a = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
_a = '▁'
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""input_ids""", """token_type_ids"""]
SCREAMING_SNAKE_CASE__ : Tuple = FNetTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_="<unk>" , lowercase_="[SEP]" , lowercase_="<pad>" , lowercase_="[CLS]" , lowercase_="[MASK]" , **lowercase_ , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : int = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : Any = do_lower_case
UpperCAmelCase_ : Tuple = remove_space
UpperCAmelCase_ : str = keep_accents
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : List[Any] = False if not self.vocab_file else True
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : List[str] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 61
| 1
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
_UpperCAmelCase = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
_UpperCAmelCase = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[str]:
UpperCamelCase_ = set()
UpperCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_ = char
UpperCamelCase_ = set(UpperCamelCase__ )
return pairs
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]="<s>" , _SCREAMING_SNAKE_CASE: int="</s>" , _SCREAMING_SNAKE_CASE: Tuple="</s>" , _SCREAMING_SNAKE_CASE: Optional[Any]="<s>" , _SCREAMING_SNAKE_CASE: Optional[int]="<unk>" , _SCREAMING_SNAKE_CASE: Dict="<pad>" , _SCREAMING_SNAKE_CASE: Any="<mask>" , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = merges_file
UpperCamelCase_ = {}
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
self.add_from_file(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
UpperCamelCase_ = merges_handle.read().split("\n" )[:-1]
UpperCamelCase_ = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCamelCase_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase_ = {}
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
UpperCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple = None , _SCREAMING_SNAKE_CASE: Optional[int] = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self: Tuple ) -> List[str]:
"""simple docstring"""
return len(self.encoder )
def lowercase ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> List[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase_ = tuple(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCamelCase_ = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
UpperCamelCase_ = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_ = bigram
UpperCamelCase_ = []
UpperCamelCase_ = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
UpperCamelCase_ = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_ = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_ = tuple(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
UpperCamelCase_ = get_pairs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = '''@@ '''.join(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = word[:-4]
UpperCamelCase_ = word
return word
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = re.findall(R"\S+\n?" , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[str] ) -> List[str]:
"""simple docstring"""
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = ''' '''.join(_SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , _SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Any:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
UpperCamelCase_ = f.readlines()
for lineTmp in lines:
UpperCamelCase_ = lineTmp.strip()
UpperCamelCase_ = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected \'<token> <cnt>\'" )
UpperCamelCase_ = line[:idx]
UpperCamelCase_ = len(self.encoder )
| 352
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCamelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_UpperCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
_UpperCamelCase : Optional[int] = field(default=lowerCAmelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCamelCase_ , os.path.join(UpperCamelCase_ , F'''{split}_results.json''' ) )
def lowerCAmelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(UpperCamelCase_ , UpperCamelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCamelCase_ , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCamelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCamelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCamelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase_ = SeqaSeqDataset
# Get datasets
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCamelCase_ ) if training_args.predict_with_generate else None
)
UpperCamelCase_ = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , data_args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , data_collator=SeqaSeqDataCollator(
UpperCamelCase_ , UpperCamelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
UpperCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase_ = train_result.metrics
UpperCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
UpperCamelCase_ = data_args.n_val
UpperCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCamelCase_ = trainer.predict(test_dataset=UpperCamelCase_ , metric_key_prefix="test" )
UpperCamelCase_ = test_output.metrics
UpperCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.predict_with_generate:
UpperCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
UpperCamelCase_ = lmap(str.strip , UpperCamelCase_ )
write_txt_file(UpperCamelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCamelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 328
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,)
__lowerCamelCase : List[str] = 10
def UpperCAmelCase__ ( self , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : str =self.dummy_model()
UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Any =model(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : int =output.prev_sample
UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : Any =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config()
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[int] =self.dummy_model()
UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : str =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =output.prev_sample
UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : List[Any] =self.scheduler_classes[0]
UpperCAmelCase : Dict =self.get_scheduler_config()
UpperCAmelCase : List[str] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
UpperCAmelCase : int =self.dummy_model()
UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : int =model(snake_case__ , snake_case__ )
UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =output.prev_sample
UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 348
| 1
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[str] = DistilBertTokenizer
__UpperCamelCase : List[Any] = DistilBertTokenizerFast
__UpperCamelCase : List[str] = True
@slow
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Tuple = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
UpperCamelCase_: List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case_ )
UpperCamelCase_: List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case_ )
UpperCamelCase_: List[str] = tokenizer.build_inputs_with_special_tokens(snake_case_ )
UpperCamelCase_: Tuple = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 369
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = """ctrl"""
__UpperCamelCase : Dict = ["""past_key_values"""]
__UpperCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Dict , snake_case_ : Any=24_6534 , snake_case_ : Dict=256 , snake_case_ : str=1280 , snake_case_ : Optional[int]=8192 , snake_case_ : Union[str, Any]=48 , snake_case_ : Any=16 , snake_case_ : Optional[int]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Any=1e-6 , snake_case_ : Optional[Any]=0.02 , snake_case_ : Optional[int]=True , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Union[str, Any] = vocab_size
UpperCamelCase_: Union[str, Any] = n_positions
UpperCamelCase_: Optional[int] = n_embd
UpperCamelCase_: int = n_layer
UpperCamelCase_: str = n_head
UpperCamelCase_: Optional[int] = dff
UpperCamelCase_: Optional[Any] = resid_pdrop
UpperCamelCase_: Union[str, Any] = embd_pdrop
UpperCamelCase_: List[str] = layer_norm_epsilon
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = use_cache
super().__init__(**snake_case_ )
| 223
| 0
|
import itertools
import string
from collections.abc import Generator, Iterable
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = iter(_lowercase )
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(itertools.islice(_lowercase , _lowercase ) )
if not chunk:
return
yield chunk
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : int = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE : List[str] = ''''''
if len(_lowercase ) < 2:
return dirty
for i in range(len(_lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowercase ) & 1:
clean += "X"
return clean
def A ( _lowercase ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
SCREAMING_SNAKE_CASE : Optional[Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowercase )
return table
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = generate_table(_lowercase )
SCREAMING_SNAKE_CASE : Any = prepare_input(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = divmod(table.index(_lowercase ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = generate_table(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(_lowercase ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 182
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def A ( _lowercase ):
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE : int = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowercase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *_lowercase , **_lowercase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *_lowercase , **_lowercase )
return wrapper
| 182
| 1
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : str =[10, 20, 30, 40, 50, 60]
A__ : int =[2, 4, 6, 8, 10, 12]
A__ : Any =1_00
self.assertEqual(kp.calc_profit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 2_10 )
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
self.assertRaisesRegex(lowerCAmelCase_ , """max_weight must greater than zero.""" )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.assertRaisesRegex(lowerCAmelCase_ , """Weight can not be negative.""" )
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
self.assertRaisesRegex(lowerCAmelCase_ , """Profit can not be negative.""" )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.assertRaisesRegex(lowerCAmelCase_ , """max_weight must greater than zero.""" )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
self.assertRaisesRegex(
lowerCAmelCase_ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 356
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = XLMRobertaTokenizer
__snake_case = XLMRobertaTokenizerFast
__snake_case = True
__snake_case = True
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Any =XLMRobertaTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] ="""<pad>"""
A__ : Any =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
A__ : int =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCAmelCase_ ) , 10_02 )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
A__ : List[Any] =XLMRobertaTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
A__ : Tuple =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Optional[int] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Optional[int] =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A__ : Union[str, Any] =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A__ : Dict =(self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : List[str] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Union[str, Any] =self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Optional[Any] =tempfile.mkdtemp()
A__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCAmelCase_ )
A__ : Union[str, Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A__ : List[str] =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
A__ : Any =tokenizer_r.from_pretrained(lowerCAmelCase_ )
A__ : Union[str, Any] =tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=True
A__ : List[str] =tempfile.mkdtemp()
A__ : List[str] =tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
A__ : List[Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
A__ : str =tokenizer_r.from_pretrained(lowerCAmelCase_ )
A__ : List[Any] =tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=False
A__ : List[str] =tempfile.mkdtemp()
A__ : Dict =tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
A__ : List[Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ : Optional[int] =tokenizer_r.from_pretrained(lowerCAmelCase_ )
A__ : str =tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
@cached_property
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_ , f.name )
A__ : Dict =XLMRobertaTokenizer(f.name , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =pickle.dumps(lowerCAmelCase_ )
pickle.loads(lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ : Any =self.get_tokenizer()
A__ : Any =self.get_rust_tokenizer()
A__ : Optional[Any] ="""I was born in 92000, and this is falsé."""
A__ : List[str] =tokenizer.tokenize(lowerCAmelCase_ )
A__ : int =rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : str =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
A__ : Dict =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Union[str, Any] =self.get_rust_tokenizer()
A__ : Union[str, Any] =tokenizer.encode(lowerCAmelCase_ )
A__ : Optional[Any] =rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
A__ : Optional[Any] ="""Hello World!"""
A__ : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : List[Any] =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A__ : Optional[Any] =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
# fmt: off
A__ : List[Any] ={"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 136
| 0
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '''▁'''
_a = {'''vocab_file''': '''prophetnet.tokenizer'''}
_a = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_a = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_a = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_1_2,
}
def _a ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as reader:
__lowerCAmelCase: Any = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Optional[Any] = token.rstrip('\n' )
__lowerCAmelCase: int = index
return vocab
class A_ ( snake_case__ ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Tuple="[SEP]" , UpperCAmelCase : Optional[int]="[SEP]" , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : Optional[int]="[PAD]" , UpperCAmelCase : int="[CLS]" , UpperCAmelCase : int="[MASK]" , UpperCAmelCase : Optional[Dict[str, Any]] = None , **UpperCAmelCase : str , ) -> None:
__lowerCAmelCase: Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
__lowerCAmelCase: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
__lowerCAmelCase: int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__lowerCAmelCase: Tuple = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(1_0 ):
__lowerCAmelCase: Optional[int] = F'''[unused{i}]'''
__lowerCAmelCase: List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__lowerCAmelCase: List[Any] = 1_2
__lowerCAmelCase: Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase )
def __getstate__( self : Optional[int] ) -> int:
__lowerCAmelCase: Tuple = self.__dict__.copy()
__lowerCAmelCase: Tuple = None
return state
def __setstate__( self : int , UpperCAmelCase : List[Any] ) -> List[Any]:
__lowerCAmelCase: List[Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase: Dict = {}
__lowerCAmelCase: Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return ([0] * len(UpperCAmelCase )) + [1]
return ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase: List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self : Any ) -> List[Any]:
return len(self.sp_model ) + self.fairseq_offset
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str ) -> str:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCAmelCase ( self : int , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase: Optional[int] = self.sp_model.PieceToId(UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : int ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Any = ''.join(UpperCAmelCase ).replace(UpperCAmelCase , ' ' ).strip()
return out_string
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase: Tuple = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
__lowerCAmelCase: List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__lowerCAmelCase: List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 322
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
__lowerCAmelCase: int = 0
__lowerCAmelCase: Tuple = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowerCAmelCase: Tuple = i + 1
else:
__lowerCAmelCase: List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
| 322
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__snake_case : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__snake_case : Optional[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__snake_case : Dict = output[output != -float("""inf""" )]
__snake_case : Optional[Any] = tf.cast(
tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@require_tf
class _A ( unittest.TestCase , __lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase__: Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[int] = 2
__snake_case : str = 2
class _A ( tf.Module ):
def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : int = [[2, 0], [1_02, 1_03]]
__snake_case : Tuple = [[1, 0], [1, 1]]
__snake_case : Union[str, Any] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(__magic_name__ ) + 1 ):
__snake_case : Union[str, Any] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Dict = 1
__snake_case : int = 2
class _A ( tf.Module ):
def __init__( self : Tuple , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : Union[str, Any] = [[2], [1_02, 1_03]]
__snake_case : Tuple = [[1], [1, 1]]
__snake_case : List[str] = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for input_row in range(len(__magic_name__ ) ):
__snake_case : Tuple = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__snake_case : str = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
@require_tensorflow_text
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ )
class _A ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
super().__init__()
__snake_case : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() )
__snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ )
__snake_case , __snake_case : List[Any] = text.pad_model_inputs(
__magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ )
return self.tokenizer.detokenize(__magic_name__ )
__snake_case : int = CompleteSentenceTransformer()
__snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__snake_case : Tuple = complete_model(__magic_name__ )
__snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ )
keras_model.save(__magic_name__ )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__snake_case : str = 14
__snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : int = """Hello, my dog is cute and"""
__snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" )
__snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : List[Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__snake_case : Dict = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : str = """Hugging Face is a technology company based in New York and Paris."""
__snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids
__snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : int = bart_model.generate(__magic_name__ ).numpy()
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) )
class _A ( bart_model.model.encoder.__class__ ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
__snake_case : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__snake_case : Dict = bart_model.generate(__magic_name__ ).numpy()
with self.assertRaises(__magic_name__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__magic_name__ , foo="""bar""" )
| 13
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
|
'''simple docstring'''
def _A ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__A = generate_large_matrix()
__A = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( lowercase__ ):
assert all(row == sorted(lowercase__ , reverse=lowercase__ ) for row in grid )
assert all(list(lowercase__ ) == sorted(lowercase__ , reverse=lowercase__ ) for col in zip(*lowercase__ ) )
def _A ( lowercase__ ):
lowercase__ = 0
lowercase__ = len(lowercase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowercase__ = (left + right) // 2
lowercase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowercase__ = mid + 1
else:
lowercase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase__ )
def _A ( lowercase__ ):
lowercase__ = 0
lowercase__ = len(grid[0] )
for i in range(len(lowercase__ ) ):
lowercase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase__ ) * len(grid[0] )) - total
def _A ( lowercase__ ):
return len([number for row in grid for number in row if number < 0] )
def _A ( lowercase__ ):
lowercase__ = 0
for row in grid:
for i, number in enumerate(lowercase__ ):
if number < 0:
total += len(lowercase__ ) - i
break
return total
def _A ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowercase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowercase__ = timeit(f'''{func}(grid=grid)''' , setup=lowercase__ , number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 164
| 0
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Tuple = logging.get_logger(__name__)
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE = 1_28
elif "12-12" in model_name:
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
__SCREAMING_SNAKE_CASE = 14
__SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 16
else:
raise ValueError("""Model not supported""" )
__SCREAMING_SNAKE_CASE = 'huggingface/label-files'
if "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE = 35
__SCREAMING_SNAKE_CASE = 'speech-commands-v2-id2label.json'
else:
__SCREAMING_SNAKE_CASE = 5_27
__SCREAMING_SNAKE_CASE = 'audioset-id2label.json'
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def a__ ( a__ ):
"""simple docstring"""
if "module.v" in name:
__SCREAMING_SNAKE_CASE = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__SCREAMING_SNAKE_CASE = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def a__ ( a__ , a__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split(""".""" )
__SCREAMING_SNAKE_CASE = int(key_split[3] )
__SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val[:dim]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE = val[-dim:]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
@torch.no_grad()
def a__ ( a__ , a__ , a__=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
__SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="""cpu""" )
# remove some keys
remove_keys(_UpperCAmelCase )
# rename some keys
__SCREAMING_SNAKE_CASE = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
# load 🤗 model
__SCREAMING_SNAKE_CASE = ASTForAudioClassification(_UpperCAmelCase )
model.eval()
model.load_state_dict(_UpperCAmelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__SCREAMING_SNAKE_CASE = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978
__SCREAMING_SNAKE_CASE = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526
__SCREAMING_SNAKE_CASE = 10_24 if 'speech-commands' not in model_name else 1_28
__SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=_UpperCAmelCase , std=_UpperCAmelCase , max_length=_UpperCAmelCase )
if "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__SCREAMING_SNAKE_CASE = dataset[0]['audio']['array']
else:
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__SCREAMING_SNAKE_CASE = torchaudio.load(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
__SCREAMING_SNAKE_CASE = feature_extractor(_UpperCAmelCase , sampling_rate=1_60_00 , return_tensors="""pt""" )
# forward pass
__SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__SCREAMING_SNAKE_CASE = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__SCREAMING_SNAKE_CASE = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ):
raise ValueError("""Logits don\'t match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase : Dict = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "markuplm"
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
# additional properties
__SCREAMING_SNAKE_CASE = max_depth
__SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings
__SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings
__SCREAMING_SNAKE_CASE = tag_pad_id
__SCREAMING_SNAKE_CASE = subs_pad_id
__SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
| 331
| 0
|
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase__ :List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
lowerCAmelCase__ :List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
lowerCAmelCase__ :str = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def lowerCAmelCase__ ( a__: Optional[int] , a__: int , a__: List[str] , a__: str , a__: List[Any] = None , a__: Dict = False , ) -> Tuple:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
_UpperCAmelCase = new_id
# turn into Numpy arrays
_UpperCAmelCase = np.array(a_ )
_UpperCAmelCase = np.array(a_ )
if reduce_labels:
_UpperCAmelCase = 2_5_5
_UpperCAmelCase = label - 1
_UpperCAmelCase = 2_5_5
_UpperCAmelCase = label != ignore_index
_UpperCAmelCase = np.not_equal(a_ , a_ )
_UpperCAmelCase = pred_label[mask]
_UpperCAmelCase = np.array(a_ )[mask]
_UpperCAmelCase = pred_label[pred_label == label]
_UpperCAmelCase = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
_UpperCAmelCase = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
_UpperCAmelCase = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
_UpperCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Union[str, Any] , a__: Tuple , a__: Dict , a__: List[str] = None , a__: str = False , ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
_UpperCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
_UpperCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
_UpperCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a_ , a_ ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = intersect_and_union(
a_ , a_ , a_ , a_ , a_ , a_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCAmelCase__ ( a__: Dict , a__: Dict , a__: str , a__: Optional[Any] , a__: Any = None , a__: Union[str, Any] = None , a__: Tuple = False , ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = total_intersect_and_union(
a_ , a_ , a_ , a_ , a_ , a_ )
# compute metrics
_UpperCAmelCase = {}
_UpperCAmelCase = total_area_intersect.sum() / total_area_label.sum()
_UpperCAmelCase = total_area_intersect / total_area_union
_UpperCAmelCase = total_area_intersect / total_area_label
_UpperCAmelCase = np.nanmean(a_ )
_UpperCAmelCase = np.nanmean(a_ )
_UpperCAmelCase = all_acc
_UpperCAmelCase = iou
_UpperCAmelCase = acc
if nan_to_num is not None:
_UpperCAmelCase = {metric: np.nan_to_num(a_ , nan=a_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = mean_iou(
results=_SCREAMING_SNAKE_CASE , gt_seg_maps=_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , ignore_index=_SCREAMING_SNAKE_CASE , nan_to_num=_SCREAMING_SNAKE_CASE , label_map=_SCREAMING_SNAKE_CASE , reduce_labels=_SCREAMING_SNAKE_CASE , )
return iou_result
| 329
|
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = [0] * len(a_ )
__A = []
__A = [1] * len(a_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a_ ) ):
if indegree[i] == 0:
queue.append(a_ )
while queue:
__A = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__A = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(a_ )
print(max(a_ ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE :List[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 15
| 0
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : Any = '''▁'''
snake_case__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
snake_case__ : Optional[Any] = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
snake_case__ : Union[str, Any] = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
snake_case__ : int = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
snake_case__ : List[Any] = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
lowerCamelCase_ :List[str] = ["input_ids"]
lowerCamelCase_ :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ :Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ :Any = RESOURCE_FILES_NAMES
def __init__( self , snake_case_ , snake_case_=None , snake_case_=False , snake_case_="utf8" , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , vocab_file=_A , encoding=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
UpperCAmelCase_ : List[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : Tuple = self.load_vocab(filepath=_A )
else:
UpperCAmelCase_ : Optional[int] = {self.sp_model.id_to_piece(_A ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : Tuple = {v: k for k, v in self.vocab.items()}
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if text is None:
return None
UpperCAmelCase_ : Any = self.tokenize(_A )
UpperCAmelCase_ , UpperCAmelCase_ : str = '', []
for i, ch in enumerate(_A ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : str = self.SP_CHAR_MAPPING.get(_A )
else:
UpperCAmelCase_ : str = unicodedata.normalize('NFKC' , _A )
if self.is_whitespace(_A ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_A ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Any = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : int = token[1:]
UpperCAmelCase_ : Optional[int] = text[offset:].index(_A ) + offset
UpperCAmelCase_ : Optional[Any] = start + len(_A )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : Union[str, Any] = end
return token_mapping
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.vocab )
def _UpperCamelCase ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : Dict = None
return state
def __setstate__( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : Optional[Any] = {}
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_A , _A ) for c in text) )
def _UpperCamelCase ( self , snake_case_ , snake_case_=False , snake_case_=6_4 , snake_case_=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get('enable_sampling' ) is True:
UpperCAmelCase_ : Optional[int] = True
if self.sp_model_kwargs.get('alpha' ) is not None:
UpperCAmelCase_ : int = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
UpperCAmelCase_ : List[Any] = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
UpperCAmelCase_ : List[str] = self.sp_model.EncodeAsPieces(_A )
else:
UpperCAmelCase_ : Optional[int] = self.sp_model.SampleEncodeAsPieces(_A , _A , _A )
UpperCAmelCase_ : List[str] = []
for pi, piece in enumerate(_A ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_A ) and pi != 0:
new_pieces.append(_A )
continue
else:
continue
UpperCAmelCase_ : int = 0
for i, chunk in enumerate(_A ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_A ) or self.is_punct(_A ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_A )
UpperCAmelCase_ : int = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : Optional[int] = i
if len(_A ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = self.convert_ids_to_tokens(_A )
UpperCAmelCase_ : Union[str, Any] = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.reverse_vocab.get(_A , self.unk_token )
def _UpperCamelCase ( self , snake_case_ , snake_case_=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _UpperCamelCase ( self , snake_case_ , snake_case_=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _UpperCamelCase ( self , snake_case_ , snake_case_=None , snake_case_=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_A ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_A ) + 1) + [1] * (len(_A ) + 3)
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_A ) == 1:
UpperCAmelCase_ : List[str] = unicodedata.category(_A )
if cat == "Zs":
return True
return False
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = {}
with io.open(_A , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_A ):
UpperCAmelCase_ : Optional[int] = line.rstrip('\n' )
UpperCAmelCase_ : Optional[Any] = int(_A )
return token_to_idx
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 0
if os.path.isdir(_A ):
UpperCAmelCase_ : Tuple = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCAmelCase_ : List[Any] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_A , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
UpperCAmelCase_ : Any = token_index
writer.write(token + '\n' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(_A , 'sentencepiece.bpe.model' )
with open(_A , 'wb' ) as fi:
UpperCAmelCase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_A )
return (vocab_file,)
| 364
|
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
UpperCAmelCase_ : Optional[int] = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
UpperCAmelCase_ : List[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('bool' ) , type=Value('int64' ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = pa.array(TypedSequence([1, 2, 3] , type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase_ : List[str] = pa.array(TypedSequence(['foo', 'bar'] , type=Value('int64' ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = pa.array(TypedSequence([1, 2, 3] , try_type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = pa.array(TypedSequence(['foo', 'bar'] , try_type=Value('int64' ) ) )
self.assertEqual(arr.type , pa.string() )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase_ : Any = pa.array(TypedSequence(['foo', 'bar'] , type=ArrayaD((1, 3) , 'int64' ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = pa.array(TypedSequence(['foo', 'bar'] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _UpperCamelCase ( self ):
'''simple docstring'''
import PIL.Image
UpperCAmelCase_ : Any = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' , side_effect=snake_case_ ) as mock_cast_to_python_objects:
UpperCAmelCase_ : int = pa.array(TypedSequence([{'path': None, 'bytes': b'image_bytes'}, pil_image] , type=Image() ) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' , snake_case_ )
self.assertFalse(kwargs['optimize_list_casting'] )
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : str = pa.BufferReader(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , pa.Buffer ) else pa.memory_map(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = pa.ipc.open_stream(lowerCamelCase_ )
UpperCAmelCase_ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = pa.BufferOutputStream()
UpperCAmelCase_ : Dict = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_ : Optional[int] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = pa.BufferOutputStream()
UpperCAmelCase_ : Optional[int] = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCamelCase_ , features=lowerCamelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCAmelCase_ : Any = pa.BufferReader(output.getvalue() )
UpperCAmelCase_ : List[str] = pa.ipc.open_stream(lowerCamelCase_ )
UpperCAmelCase_ : pa.Table = f.read_all()
UpperCAmelCase_ : Optional[Any] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCamelCase_ )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
def _lowerCamelCase ( lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ : Any = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_ : int = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=10 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=10 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def _lowerCamelCase ( lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCAmelCase_ : str = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _lowerCamelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = pa.BufferOutputStream()
UpperCAmelCase_ : Optional[Any] = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
UpperCAmelCase_ , UpperCAmelCase_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_ : int = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = pa.BufferOutputStream()
UpperCAmelCase_ : Optional[int] = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_ : Any = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _lowerCamelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = pa.BufferOutputStream()
UpperCAmelCase_ : Tuple = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_ : Union[str, Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Union[str, Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ , 'test.arrow' )
with ArrowWriter(path=lowerCamelCase_ , schema=pa.schema(lowerCamelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(lowerCamelCase_ , 1 )
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
if pa.types.is_list(lowerCamelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowerCamelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
if isinstance(lst[0] , lowerCamelCase_ ):
change_first_primitive_element_in_list(lst[0] , lowerCamelCase_ )
else:
UpperCAmelCase_ : str = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = pa.array(TypedSequence(lowerCamelCase_ , optimized_int_type=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = pa.array(OptimizedTypedSequence(lowerCamelCase_ , col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCAmelCase_ : int = copy.deepcopy(lowerCamelCase_ )
UpperCAmelCase_ : str = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : Dict = pa.array(OptimizedTypedSequence(lowerCamelCase_ , col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def _lowerCamelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCAmelCase_ : int = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCamelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowerCamelCase ( lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCAmelCase_ : Dict = 'mock://dataset-train.arrow'
with ArrowWriter(path=lowerCamelCase_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowerCamelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCamelCase_ )
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = pa.BufferOutputStream()
with ParquetWriter(stream=lowerCamelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCAmelCase_ : List[str] = pa.BufferReader(output.getvalue() )
UpperCAmelCase_ : pa.Table = pq.read_table(lowerCamelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def _lowerCamelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
import PIL.Image
UpperCAmelCase_ : str = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowerCamelCase_ , format='png' )
UpperCAmelCase_ : int = pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCamelCase_ , features=Features({'image': Image()} ) , embed_local_files=lowerCamelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
UpperCAmelCase_ : Optional[int] = pa.BufferReader(output.getvalue() )
UpperCAmelCase_ : pa.Table = pq.read_table(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , lowerCamelCase_ )
with open(lowerCamelCase_ , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = pa.schema([pa.field('col_1' , pa.string() , nullable=lowerCamelCase_ )] )
UpperCAmelCase_ : Dict = pa.BufferOutputStream()
with ArrowWriter(stream=lowerCamelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCamelCase_ )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] )
| 274
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Tuple = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 238
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
__lowerCamelCase = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = LxmertTokenizer
def __init__(self : Dict , snake_case__ : Tuple=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Optional[Any]="[SEP]" , snake_case__ : Optional[Any]="[PAD]" , snake_case__ : List[Any]="[CLS]" , snake_case__ : Tuple="[MASK]" , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
snake_case : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) )
snake_case : str = do_lower_case
snake_case : List[Any] = strip_accents
snake_case : Optional[int] = tokenize_chinese_chars
snake_case : int = normalizer_class(**snake_case__ )
snake_case : Optional[Any] = do_lower_case
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : Optional[Any] = [self.sep_token_id]
snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case : List[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 59
| 0
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase_ ( nn.Module):
"""simple docstring"""
def __init__( self : str ) -> Optional[int]:
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Linear(3 , 4 )
__SCREAMING_SNAKE_CASE = nn.BatchNormad(4 )
__SCREAMING_SNAKE_CASE = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : int ) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[Any] ) -> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> List[Any]:
return output + 1
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> List[str]:
__SCREAMING_SNAKE_CASE = ModelForTest()
__SCREAMING_SNAKE_CASE = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , "_hf_hook" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , "_old_forward" ) )
def UpperCAmelCase_ ( self : Any ) -> Dict:
__SCREAMING_SNAKE_CASE = ModelForTest()
__SCREAMING_SNAKE_CASE = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , "_hf_hook" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , "_old_forward" ) )
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = ModelForTest()
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = test_model(x + 1 )
__SCREAMING_SNAKE_CASE = test_model(x + 2 )
__SCREAMING_SNAKE_CASE = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__SCREAMING_SNAKE_CASE = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__SCREAMING_SNAKE_CASE = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 )
def UpperCAmelCase_ ( self : str ) -> str:
__SCREAMING_SNAKE_CASE = ModelForTest()
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = test_model(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__SCREAMING_SNAKE_CASE = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__SCREAMING_SNAKE_CASE = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE = ModelForTest()
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = test_model(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ).to(0 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
__SCREAMING_SNAKE_CASE = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
__SCREAMING_SNAKE_CASE = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
__SCREAMING_SNAKE_CASE = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
__SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
__SCREAMING_SNAKE_CASE = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def UpperCAmelCase_ ( self : int ) -> Dict:
__SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
__SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
__SCREAMING_SNAKE_CASE = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
__SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 365
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : List[Any] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "efficientformer"
def __init__( self : Any , UpperCAmelCase__ : List[int] = [3, 2, 6, 4] , UpperCAmelCase__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , UpperCAmelCase__ : List[bool] = [True, True, True, True] , UpperCAmelCase__ : int = 4_4_8 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 5 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1_6 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : float = 1E-5 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1E-12 , UpperCAmelCase__ : int = 2_2_4 , UpperCAmelCase__ : float = 1E-05 , **UpperCAmelCase__ : Tuple , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_expansion_ratio
__SCREAMING_SNAKE_CASE = downsamples
__SCREAMING_SNAKE_CASE = dim
__SCREAMING_SNAKE_CASE = key_dim
__SCREAMING_SNAKE_CASE = attention_ratio
__SCREAMING_SNAKE_CASE = resolution
__SCREAMING_SNAKE_CASE = pool_size
__SCREAMING_SNAKE_CASE = downsample_patch_size
__SCREAMING_SNAKE_CASE = downsample_stride
__SCREAMING_SNAKE_CASE = downsample_pad
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = num_metaad_blocks
__SCREAMING_SNAKE_CASE = distillation
__SCREAMING_SNAKE_CASE = use_layer_scale
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = batch_norm_eps
| 195
| 0
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = "new-model"
if is_tf_available():
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = NewModelConfig
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """bert-base-cased"""
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """bert-base-cased"""
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ ,output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ ,output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ,output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
@require_tensorflow_probability
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ ,output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) ,14410 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) ,14410 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = copy.deepcopy(model.config )
SCREAMING_SNAKE_CASE = ["""FunnelBaseModel"""]
SCREAMING_SNAKE_CASE = TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""new-model""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ ,lowerCamelCase__ )
auto_class.register(lowerCamelCase__ ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config()
SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() )
SCREAMING_SNAKE_CASE = auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" ,):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(lowerCamelCase__ ,"""Use `from_pt=True` to load this model""" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 296
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git_vision_model"
def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git"
def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = num_image_with_embedding
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 296
| 1
|
"""simple docstring"""
from typing import Any
def UpperCamelCase ( UpperCAmelCase ) ->list[Any]:
"""simple docstring"""
if not input_list:
return []
a_ = [input_list.count(UpperCAmelCase ) for value in input_list]
a_ = max(UpperCAmelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCAmelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 203
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
class A ( nn.Module ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Optional[Any]=("DownEncoderBlock2D",) , __UpperCAmelCase : int=(6_4,) , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=3_2 , __UpperCAmelCase : str="silu" , __UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = torch.nn.Convad(
__UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
# down
UpperCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_down_block(
__UpperCAmelCase , num_layers=self.layers_per_block , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
self.down_blocks.append(__UpperCAmelCase )
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# out
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = 2 * out_channels if double_z else out_channels
UpperCAmelCase__ = nn.Convad(block_out_channels[-1] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : List[Any] , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = x
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : int ):
def custom_forward(*__UpperCAmelCase : Optional[Any] ):
return module(*__UpperCAmelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase__ = down_block(__UpperCAmelCase )
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase )
# post-process
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : List[Any] , __UpperCAmelCase : str=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=("UpDecoderBlock2D",) , __UpperCAmelCase : str=(6_4,) , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Tuple=3_2 , __UpperCAmelCase : Any="silu" , __UpperCAmelCase : Any="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = nn.Convad(
__UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
UpperCAmelCase__ = in_channels if norm_type == "spatial" else None
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# up
UpperCAmelCase__ = list(reversed(__UpperCAmelCase ) )
UpperCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = reversed_block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_up_block(
__UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , resnet_time_scale_shift=__UpperCAmelCase , )
self.up_blocks.append(__UpperCAmelCase )
UpperCAmelCase__ = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase__ = SpatialNorm(block_out_channels[0] , __UpperCAmelCase )
else:
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = nn.Convad(block_out_channels[0] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = z
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
UpperCAmelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : str ):
def custom_forward(*__UpperCAmelCase : List[str] ):
return module(*__UpperCAmelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = up_block(__UpperCAmelCase , __UpperCAmelCase )
# post-process
if latent_embeds is None:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]="random" , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Union[str, Any]=True ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = n_e
UpperCAmelCase__ = vq_embed_dim
UpperCAmelCase__ = beta
UpperCAmelCase__ = legacy
UpperCAmelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase__ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase__ = self.used.shape[0]
UpperCAmelCase__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase__ = self.re_embed
UpperCAmelCase__ = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCAmelCase__ = n_e
UpperCAmelCase__ = sane_index_shape
def lowercase_ (self : str , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
UpperCAmelCase__ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase__ = match.argmax(-1 )
UpperCAmelCase__ = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase__ = self.unknown_index
return new.reshape(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase__ = 0 # simply set to zero
UpperCAmelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCAmelCase )
return back.reshape(__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase__ = torch.argmin(torch.cdist(__UpperCAmelCase , self.embedding.weight ) , dim=1 )
UpperCAmelCase__ = self.embedding(__UpperCAmelCase ).view(z.shape )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase__ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.remap_to_used(__UpperCAmelCase )
UpperCAmelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
if self.remap is not None:
UpperCAmelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.unmap_to_all(__UpperCAmelCase )
UpperCAmelCase__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase__ = self.embedding(__UpperCAmelCase )
if shape is not None:
UpperCAmelCase__ = z_q.view(__UpperCAmelCase )
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A ( UpperCAmelCase_ ):
def __init__(self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str=False ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parameters
UpperCAmelCase__ , UpperCAmelCase__ = torch.chunk(__UpperCAmelCase , 2 , dim=1 )
UpperCAmelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase__ = deterministic
UpperCAmelCase__ = torch.exp(0.5 * self.logvar )
UpperCAmelCase__ = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase__ = UpperCAmelCase__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = randn_tensor(
self.mean.shape , generator=__UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase__ = self.mean + self.std * sample
return x
def lowercase_ (self : str , __UpperCAmelCase : int=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=[1, 2, 3] ) -> Dict:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.mean
| 65
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = botoa.client("iam" )
lowercase__ : str = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowerCamelCase__ , AssumeRolePolicyDocument=json.dumps(lowerCamelCase__ , indent=2 ) )
lowercase__ : Tuple = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowerCamelCase__ , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(lowerCamelCase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = botoa.client("iam" )
return iam_client.get_role(RoleName=lowerCamelCase__ )["Role"]["Arn"]
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , lowerCamelCase__ , )
lowercase__ : Any = None
if credentials_configuration == 0:
lowercase__ : int = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
lowercase__ : Tuple = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
lowercase__ : Any = _ask_field("AWS Access Key ID: " )
lowercase__ : List[Any] = aws_access_key_id
lowercase__ : int = _ask_field("AWS Secret Access Key: " )
lowercase__ : Optional[Any] = aws_secret_access_key
lowercase__ : Any = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
lowercase__ : Union[str, Any] = aws_region
lowercase__ : Tuple = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , lowerCamelCase__ , )
if role_management == 0:
lowercase__ : Tuple = _ask_field("Enter your IAM role name: " )
else:
lowercase__ : Optional[int] = "accelerate_sagemaker_execution_role"
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(lowerCamelCase__ )
lowercase__ : int = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
lowercase__ : Any = None
if is_custom_docker_image:
lowercase__ : Tuple = _ask_field("Enter your Docker image: " , lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower() )
lowercase__ : Tuple = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
lowercase__ : str = None
if is_sagemaker_inputs_enabled:
lowercase__ : str = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower() , )
lowercase__ : Tuple = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
lowercase__ : List[str] = None
if is_sagemaker_metrics_enabled:
lowercase__ : Dict = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower() , )
lowercase__ : int = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
lowercase__ : Union[str, Any] = {}
lowercase__ : Union[str, Any] = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
if use_dynamo:
lowercase__ : int = "dynamo_"
lowercase__ : Optional[int] = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowercase__ : Optional[Any] = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
if use_custom_options:
lowercase__ : int = _ask_options(
"Which mode do you want to use?" , lowerCamelCase__ , lambda lowerCamelCase__ : TORCH_DYNAMO_MODES[int(lowerCamelCase__ )] , default="default" , )
lowercase__ : Any = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
lowercase__ : Optional[int] = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
lowercase__ : int = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
lowercase__ : str = _ask_options(
lowerCamelCase__ , lowerCamelCase__ , lambda lowerCamelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowerCamelCase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowercase__ : Union[str, Any] = _ask_field(lowerCamelCase__ , lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower() , default="ml.p3.2xlarge" )
lowercase__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowercase__ : Optional[int] = _ask_field(
"How many machines do you want use? [1]: " , lowerCamelCase__ , default=1 , )
lowercase__ : Union[str, Any] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=lowerCamelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowerCamelCase__ , use_cpu=lowerCamelCase__ , dynamo_config=lowerCamelCase__ , eca_instance_type=lowerCamelCase__ , profile=lowerCamelCase__ , region=lowerCamelCase__ , iam_role_name=lowerCamelCase__ , mixed_precision=lowerCamelCase__ , num_machines=lowerCamelCase__ , sagemaker_inputs_file=lowerCamelCase__ , sagemaker_metrics_file=lowerCamelCase__ , )
| 121
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
lowerCAmelCase__ = {'''vinai/bartpho-syllable''': 1_0_2_4}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]="<s>" , SCREAMING_SNAKE_CASE : Optional[int]="</s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : List[str]="<s>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE : Tuple="<pad>" , SCREAMING_SNAKE_CASE : List[str]="<mask>" , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : int , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Dict = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
lowercase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = vocab_file
lowercase__ : Union[str, Any] = monolingual_vocab_file
lowercase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__ : Any = {}
lowercase__ : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
lowercase__ : Dict = cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
lowercase__ : int = line.strip().split()[0]
lowercase__ : List[str] = len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
lowercase__ : Optional[Any] = len(self.fairseq_tokens_to_ids )
lowercase__ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
lowercase__ : Dict = self.__dict__.copy()
lowercase__ : Union[str, Any] = None
lowercase__ : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ : Dict = {}
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : List[Any] = [self.cls_token_id]
lowercase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self : Optional[int] ):
return len(self.fairseq_ids_to_tokens )
def snake_case ( self : List[Any] ):
lowercase__ : Any = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : int ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Any ):
return self.fairseq_ids_to_tokens[index]
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : str = "".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , " " ).strip()
return out_string
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : str = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , "wb" ) as fi:
lowercase__ : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(SCREAMING_SNAKE_CASE )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 121
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Union[str, Any] = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 2
|
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = []
create_all_state(1 , A , A , [] , A )
return result
def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def _SCREAMING_SNAKE_CASE (A ) -> None:
"""simple docstring"""
for i in total_list:
print(*A )
if __name__ == "__main__":
lowerCamelCase : Tuple = 4
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 2
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A( a ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a = bertabert.config.encoder.vocab_size
__a = tokenizer.sep_token_id
__a = tokenizer.cls_token_id
__a = 128
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a = train_dataset.select(range(32 ) )
__a = val_dataset.select(range(16 ) )
__a = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
__a = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
__a = inputs.input_ids
__a = inputs.attention_mask
__a = outputs.input_ids
__a = outputs.input_ids.copy()
__a = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
__a = pred.label_ids
__a = pred.predictions
# all unnecessary tokens are removed
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
__a = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a = self.get_auto_remove_tmp_dir()
__a = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 33
| 1
|
'''simple docstring'''
_lowerCamelCase : str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __lowerCamelCase ( A__ ) -> bytes:
"""simple docstring"""
# Make sure the supplied data is a bytes-like object
if not isinstance(A__ , A__ ):
UpperCamelCase = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(A__ )
UpperCamelCase = ''.join(bin(A__ )[2:].zfill(8 ) for byte in data )
UpperCamelCase = len(A__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase = B'=' * ((6 - len(A__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A__ ) % 6)
else:
UpperCamelCase = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A__ ) , 6 ) ).encode()
+ padding
)
def __lowerCamelCase ( A__ ) -> bytes:
"""simple docstring"""
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(A__ , A__ ) and not isinstance(A__ , A__ ):
UpperCamelCase = (
'argument should be a bytes-like object or ASCII string, '
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(A__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A__ , A__ ):
try:
UpperCamelCase = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
UpperCamelCase = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase = encoded_data[:-padding]
UpperCamelCase = ''.join(
bin(B64_CHARSET.index(A__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase = ''.join(
bin(B64_CHARSET.index(A__ ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A__ ) , 8 )
]
return bytes(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = len(lowerCAmelCase )
for i in range(length - 1 ):
_lowerCAmelCase = i
for k in range(i + 1 , lowerCAmelCase ):
if collection[k] < collection[least]:
_lowerCAmelCase = k
if least != i:
_lowerCAmelCase , _lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
A__ : str =input('''Enter numbers separated by a comma:\n''').strip()
A__ : Optional[int] =[int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 70
| 0
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__:
@staticmethod
def snake_case__ ( *__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
pass
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = np.array(snake_case__ )
A__ = npimg.shape
return {"hash": hashimage(snake_case__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__( unittest.TestCase ):
lowerCAmelCase__ : Dict = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase__ : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
A__ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ ,image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def snake_case__ ( self ) -> Dict:
pass
@slow
@require_torch
def snake_case__ ( self ) -> Optional[int]:
A__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
A__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=2_56 )
# Shortening by hashing
A__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_80, 6_40)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (4_80, 6_40)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (4_80, 6_40)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (4_80, 6_40)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (4_80, 6_40)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (4_80, 6_40)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (4_80, 6_40)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (4_80, 6_40)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_80, 6_40)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (4_80, 6_40)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (4_80, 6_40)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (4_80, 6_40)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (4_80, 6_40)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_80, 6_40)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (4_80, 6_40)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (4_80, 6_40)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_80, 6_40)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_80, 6_40)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_80, 6_40)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_80, 6_40)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def snake_case__ ( self ) -> str:
A__ = 'facebook/sam-vit-huge'
A__ = pipeline('mask-generation' ,model=SCREAMING_SNAKE_CASE_ )
A__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=2_56 )
# Shortening by hashing
A__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0_0_5_3},
] ,)
| 365
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowercase__ ( lowerCamelCase__, lowerCamelCase__ ):
a_ ='convnextv2'
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=224 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[Any]:
'''simple docstring'''
super().__init__(**lowercase__ )
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_stages
lowerCAmelCase__ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowerCAmelCase__ = [3, 3, 9, 3] if depths is None else depths
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = image_size
lowerCAmelCase__ = ["stem"] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ = get_aligned_output_features_output_indices(
out_features=lowercase__ , out_indices=lowercase__ , stage_names=self.stage_names )
| 340
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 104
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 180
|
from math import isqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCamelCase) + 1))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 10**6) -> int:
a = 0
a = 1
a = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCamelCase)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 180
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def __lowerCAmelCase ():
__lowerCAmelCase : dict[int, int] = {}
__lowerCAmelCase : List[str] = 2
while True:
__lowerCAmelCase : List[Any] = factor_map.pop(_UpperCamelCase , _UpperCamelCase )
if factor:
__lowerCAmelCase : Optional[Any] = factor + prime
while x in factor_map:
x += factor
__lowerCAmelCase : str = factor
else:
__lowerCAmelCase : List[Any] = prime
yield prime
prime += 1
def __lowerCAmelCase (_UpperCamelCase = 1e1_0 ):
__lowerCAmelCase : Dict = sieve()
__lowerCAmelCase : Optional[Any] = 1
while True:
__lowerCAmelCase : int = next(_UpperCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_UpperCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 86
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=18 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Any:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Dict = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : int ) -> str:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Dict ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : List[str] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Tuple ) -> List[str]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Any = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 270
| 0
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger()
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : nn.Module
__UpperCAmelCase : List[nn.Module] = field(default_factory=snake_case_ )
__UpperCAmelCase : list = field(default_factory=snake_case_ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : Any = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__ , nn.Convad ) or isinstance(UpperCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
return list(filter(lambda UpperCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : nn.Module
__UpperCAmelCase : nn.Module
__UpperCAmelCase : int = 0
__UpperCAmelCase : List = field(default_factory=snake_case_ )
__UpperCAmelCase : List = field(default_factory=snake_case_ )
def __call__( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : int = Tracker(self.dest )(UpperCamelCase__ ).parametrized
snake_case : Union[str, Any] = Tracker(self.src )(UpperCamelCase__ ).parametrized
snake_case : List[str] = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.src_skip , UpperCamelCase__ ) )
snake_case : List[str] = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.dest_skip , UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while'
F' destination module has {len(UpperCamelCase__ )}.' )
for dest_m, src_m in zip(UpperCamelCase__ , UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __lowerCAmelCase ( lowercase : str , lowercase : ResNetConfig , lowercase : Path , lowercase : bool = True ) -> Dict:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
snake_case : Union[str, Any] = timm.create_model(lowercase , pretrained=lowercase ).eval()
snake_case : str = ResNetForImageClassification(lowercase ).eval()
snake_case : Union[str, Any] = ModuleTransfer(src=lowercase , dest=lowercase )
snake_case : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(lowercase )
assert torch.allclose(from_model(lowercase ) , our_model(lowercase ).logits ), "The model logits don't match the original one."
snake_case : Tuple = F'resnet{"-".join(name.split("resnet" ) )}'
print(lowercase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=lowercase , )
# we can use the convnext one
snake_case : str = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=lowercase , )
print(F'Pushed {checkpoint_name}' )
def __lowerCAmelCase ( lowercase : Path , lowercase : str = None , lowercase : bool = True ) -> int:
"""simple docstring"""
snake_case : List[str] = "imagenet-1k-id2label.json"
snake_case : List[str] = 1000
snake_case : Tuple = (1, num_labels)
snake_case : List[str] = "huggingface/label-files"
snake_case : Dict = num_labels
snake_case : int = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
snake_case : str = {int(lowercase ): v for k, v in idalabel.items()}
snake_case : Dict = idalabel
snake_case : Any = {v: k for k, v in idalabel.items()}
snake_case : Union[str, Any] = partial(lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase )
snake_case : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(lowercase , names_to_config[model_name] , lowercase , lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowercase , lowercase , lowercase , lowercase )
return config, expected_shape
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__snake_case = parser.parse_args()
__snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 350
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def __lowerCAmelCase ( lowercase : Optional[Any]="ro" , lowercase : Union[str, Any]="en" , lowercase : str="wmt16" , lowercase : Any=None ) -> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
snake_case : Any = F'{src_lang}-{tgt_lang}'
print(F'Converting {dataset}-{pair}' )
snake_case : Union[str, Any] = datasets.load_dataset(lowercase , lowercase )
if save_dir is None:
snake_case : int = F'{dataset}-{pair}'
snake_case : Optional[Any] = Path(lowercase )
save_dir.mkdir(exist_ok=lowercase )
for split in ds.keys():
print(F'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
snake_case : Any = "val" if split == "validation" else split
snake_case : List[str] = save_dir.joinpath(F'{fn}.source' )
snake_case : int = save_dir.joinpath(F'{fn}.target' )
snake_case : str = src_path.open("w+" )
snake_case : Optional[int] = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case : int = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 112
| 0
|
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = """Usage of script: script_name <size_of_canvas:int>"""
lowercase_ = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCamelCase ( __lowerCamelCase : int ) ->list[list[bool]]:
_SCREAMING_SNAKE_CASE = [[False for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
return canvas
def lowerCamelCase ( __lowerCamelCase : list[list[bool]] ) ->None:
for i, row in enumerate(__lowerCamelCase ):
for j, _ in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = bool(random.getrandbits(1 ) )
def lowerCamelCase ( __lowerCamelCase : list[list[bool]] ) ->list[list[bool]]:
_SCREAMING_SNAKE_CASE = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowerCamelCase ):
for c, pt in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = __judge_point(
__lowerCamelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_SCREAMING_SNAKE_CASE = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_SCREAMING_SNAKE_CASE = current_canvas.tolist()
return return_canvas
def lowerCamelCase ( __lowerCamelCase : bool , __lowerCamelCase : list[list[bool]] ) ->bool:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_SCREAMING_SNAKE_CASE = pt
if pt:
if alive < 2:
_SCREAMING_SNAKE_CASE = False
elif alive == 2 or alive == 3:
_SCREAMING_SNAKE_CASE = True
elif alive > 3:
_SCREAMING_SNAKE_CASE = False
else:
if alive == 3:
_SCREAMING_SNAKE_CASE = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["""w""", """k"""])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 58
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=8 ) ->Tuple:
_SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A , A , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
_SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_( self , A , A , A , A , A , A ) -> Union[str, Any]:
if latents is None:
_SCREAMING_SNAKE_CASE = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_SCREAMING_SNAKE_CASE = latents.to(A )
_SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def snake_case_( self , A=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_SCREAMING_SNAKE_CASE = torch.device(f'cuda:{gpu_id}' )
_SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def snake_case_( self , A=0 ) -> str:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_SCREAMING_SNAKE_CASE = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
_SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_( self ) -> Tuple:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self , A , A , A = 512 , A = 512 , A = 100 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> List[str]:
_SCREAMING_SNAKE_CASE = self._execution_device
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = torch.cat(A , dim=0 )
_SCREAMING_SNAKE_CASE = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = torch.cat(A , dim=0 )
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(A , dim=0 )
_SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(A , dim=0 )
_SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.unet.config.in_channels
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
_SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = {"""image_embeds""": image_embeds}
_SCREAMING_SNAKE_CASE = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
_SCREAMING_SNAKE_CASE = self.movq.decode(A , force_not_quantize=A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
_SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 58
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCamelCase_ = logging.getLogger(__name__)
def UpperCamelCase( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
lowerCamelCase_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase_ : Optional[str] = field(
default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase_ : Optional[str] = field(
default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase_ : Optional[str] = field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __lowerCamelCase :
lowerCamelCase_ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
lowerCamelCase_ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
lowerCamelCase_ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase_ : bool = field(
default=__snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def UpperCamelCase( ) -> int:
'''simple docstring'''
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(lowercase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowercase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowercase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowercase_ ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowercase_ , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(lowercase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , compute_metrics=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowercase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowercase_ , lowercase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowercase_ )
return results
def UpperCamelCase( lowercase_ ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 34
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def UpperCamelCase( lowercase_ ) -> Any:
'''simple docstring'''
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def UpperCamelCase( ) -> str:
'''simple docstring'''
snake_case_ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=lowercase_ )
snake_case_ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowercase_ )
EnvironmentCommand.register_subcommand(lowercase_ )
TestCommand.register_subcommand(lowercase_ )
RunBeamCommand.register_subcommand(lowercase_ )
DummyDataCommand.register_subcommand(lowercase_ )
# Parse args
snake_case_ , snake_case_ = parser.parse_known_args()
if not hasattr(lowercase_ , """func""" ):
parser.print_help()
exit(1 )
snake_case_ = parse_unknown_args(lowercase_ )
# Run
snake_case_ = args.func(lowercase_ , **lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 34
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _SCREAMING_SNAKE_CASE( UpperCAmelCase_ ):
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ ,'''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ ,'''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ ,'''num_attention_heads''' ) )
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=6_40 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__="silu" ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=10 ,SCREAMING_SNAKE_CASE__=None ,) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = parent
__SCREAMING_SNAKE_CASE :List[Any] = batch_size
__SCREAMING_SNAKE_CASE :Tuple = image_size
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :List[Any] = num_channels
__SCREAMING_SNAKE_CASE :str = last_hidden_size
__SCREAMING_SNAKE_CASE :List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE :Dict = hidden_act
__SCREAMING_SNAKE_CASE :Optional[Any] = conv_kernel_size
__SCREAMING_SNAKE_CASE :str = output_stride
__SCREAMING_SNAKE_CASE :Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Dict = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :int = classifier_dropout_prob
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE :Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE :Any = num_labels
__SCREAMING_SNAKE_CASE :Tuple = initializer_range
__SCREAMING_SNAKE_CASE :List[str] = scope
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :List[str] = None
__SCREAMING_SNAKE_CASE :List[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE :str = ids_tensor([self.batch_size] ,self.num_labels )
__SCREAMING_SNAKE_CASE :Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__SCREAMING_SNAKE_CASE :List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = MobileViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE :Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.num_labels
__SCREAMING_SNAKE_CASE :str = MobileViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE :Dict = model(lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.num_labels
__SCREAMING_SNAKE_CASE :List[Any] = MobileViTForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
__SCREAMING_SNAKE_CASE :List[str] = model(lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE :Any = config_and_inputs
__SCREAMING_SNAKE_CASE :int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : int = False
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = MobileViTModelTester(self )
__SCREAMING_SNAKE_CASE :Union[str, Any] = MobileViTConfigTester(self ,config_class=lowerCAmelCase__ ,has_text_modality=lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :List[str] = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE :List[Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE :List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,lowerCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ ,lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE :Optional[int] = outputs.hidden_states
__SCREAMING_SNAKE_CASE :Any = 5
self.assertEqual(len(lowerCAmelCase__ ) ,lowerCAmelCase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__SCREAMING_SNAKE_CASE :Optional[Any] = 2
for i in range(len(lowerCAmelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
__SCREAMING_SNAKE_CASE :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :Dict = True
check_hidden_states_output(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE :Optional[int] = True
check_hidden_states_output(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__ )
@slow
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Optional[int] = MobileViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __lowerCamelCase ( ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :int = self.default_image_processor
__SCREAMING_SNAKE_CASE :Union[str, Any] = prepare_img()
__SCREAMING_SNAKE_CASE :List[str] = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE :int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Any = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :List[Any] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = prepare_img()
__SCREAMING_SNAKE_CASE :Any = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Optional[int] = model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = outputs.logits
# verify the logits
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] ,device=lowerCAmelCase__ ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,lowerCAmelCase__ ,atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE :Dict = model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE :Dict = prepare_img()
__SCREAMING_SNAKE_CASE :Optional[Any] = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :int = outputs.logits.detach().cpu()
__SCREAMING_SNAKE_CASE :Any = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ ,target_sizes=[(50, 60)] )
__SCREAMING_SNAKE_CASE :Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,lowerCAmelCase__ )
| 191
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13
| 0
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : TreeNode | None = None
__UpperCAmelCase : TreeNode | None = None
_a = namedtuple('CoinsDistribResult', 'moves excess')
def _A ( UpperCamelCase_ : TreeNode | None) -> Tuple:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(UpperCamelCase_ : TreeNode | None) -> int:
if node is None:
return 0
return count_nodes(node.left) + count_nodes(node.right) + 1
def count_coins(UpperCamelCase_ : TreeNode | None) -> int:
if node is None:
return 0
return count_coins(node.left) + count_coins(node.right) + node.data
if count_nodes(SCREAMING_SNAKE_CASE__) != count_coins(SCREAMING_SNAKE_CASE__):
raise ValueError("The nodes number should be same as the number of coins")
# Main calculation
def get_distrib(UpperCamelCase_ : TreeNode | None) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0, 1)
__lowercase = get_distrib(node.left)
__lowercase = get_distrib(node.right)
__lowercase = 1 - left_distrib_excess
__lowercase = 1 - right_distrib_excess
__lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE__)
+ abs(SCREAMING_SNAKE_CASE__)
)
__lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__)
return get_distrib(SCREAMING_SNAKE_CASE__)[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
assert x is not None
assert y is not None
_lowerCamelCase : Dict =len(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[Any] =len(SCREAMING_SNAKE_CASE__ )
# declaring the array for storing the dp values
_lowerCamelCase : Optional[int] =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowerCamelCase : int =1 if x[i - 1] == y[j - 1] else 0
_lowerCamelCase : Union[str, Any] =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowerCamelCase : Optional[int] =''
_lowerCamelCase , _lowerCamelCase : Dict =m, n
while i > 0 and j > 0:
_lowerCamelCase : int =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowerCamelCase : Union[str, Any] =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCamelCase = 'AGGTAB'
lowerCamelCase = 'GXTXAYB'
lowerCamelCase = 4
lowerCamelCase = 'GTAB'
lowerCamelCase , lowerCamelCase = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 199
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_SCREAMING_SNAKE_CASE : Any = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 92
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "decision_transformer"
lowerCAmelCase_ : List[Any] = ["past_key_values"]
lowerCAmelCase_ : Tuple = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , a__=17 , a__=4 , a__=128 , a__=4_096 , a__=True , a__=1 , a__=1_024 , a__=3 , a__=1 , a__=None , a__="relu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=1e-5 , a__=0.0_2 , a__=True , a__=True , a__=50_256 , a__=50_256 , a__=False , a__=False , **a__ , ) -> Optional[int]:
'''simple docstring'''
snake_case_ = state_dim
snake_case_ = act_dim
snake_case_ = hidden_size
snake_case_ = max_ep_len
snake_case_ = action_tanh
snake_case_ = vocab_size
snake_case_ = n_positions
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_inner
snake_case_ = activation_function
snake_case_ = resid_pdrop
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = scale_attn_weights
snake_case_ = use_cache
snake_case_ = scale_attn_by_inverse_layer_idx
snake_case_ = reorder_and_upcast_attn
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(bos_token_id=a__ , eos_token_id=a__ , **a__ )
| 92
| 1
|
import pprint
import requests
a__: Optional[int] = 'https://zenquotes.io/api'
def UpperCamelCase__( )->str:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def UpperCamelCase__( )->List[Any]:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
a__: Optional[Any] = random_quotes()
pprint.pprint(response)
| 193
|
"""simple docstring"""
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A_ ):
if len(A_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A_ ) )
return data_lists
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for dlist, weight in zip(A_, A_ ):
_lowerCamelCase : Any = min(A_ )
_lowerCamelCase : Optional[Any] = max(A_ )
_lowerCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCamelCase : str = F'''Invalid weight of {weight:f} provided'''
raise ValueError(A_ )
score_lists.append(A_ )
return score_lists
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A_ ):
_lowerCamelCase : List[str] = final_scores[j] + ele
return final_scores
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = get_data(A_ )
_lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ )
_lowerCamelCase : str = generate_final_scores(A_ )
# append scores to source data
for i, ele in enumerate(A_ ):
source_data[i].append(A_ )
return source_data
| 72
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE :List[str] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :int = ['YolosFeatureExtractor']
SCREAMING_SNAKE_CASE :Optional[int] = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[Any] = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] ,A : Optional[Any] ,A : List[Any] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__A = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self : Tuple ,A : int = 1 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : float = 0.0 ,A : int = 50 ,A : Optional[bool] = None ,A : Optional[str] = "pil" ,A : bool = True ,):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size ,A ):
__A = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__A = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A ,A ) and len(A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__A = randn_tensor(A ,generator=A ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__A = self.unet(A ,A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(
A ,A ,A ,eta=A ,use_clipped_model_output=A ,generator=A ).prev_sample
__A = (image / 2 + 0.5).clamp(0 ,1 )
__A = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 124
| 0
|
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase__ = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[Any] ="dummy_data"
a : int ="datasets"
a : Tuple =False
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = False , snake_case__ = True , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : Tuple = 0
lowerCAmelCase : int = dataset_name
lowerCAmelCase : List[Any] = cache_dir
lowerCAmelCase : List[str] = use_local_dummy_data
lowerCAmelCase : List[str] = config
# download_callbacks take a single url as input
lowerCAmelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase : Union[str, Any] = str(snake_case__ )
# to be downloaded
lowerCAmelCase : List[Any] = None
lowerCAmelCase : List[Any] = None
@property
def lowercase__ ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCAmelCase : Any = self.download_dummy_data()
return self._dummy_file
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def lowercase__ ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase : str = cached_path(
snake_case__ , cache_dir=self.cache_dir , extract_compressed_file=snake_case__ , force_extract=snake_case__ )
return os.path.join(snake_case__ , self.dummy_file_name )
@property
def lowercase__ ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase__ ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCAmelCase : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def lowercase__ ( self ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def lowercase__ ( self , snake_case__ , *snake_case__ ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase : int = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case__ , snake_case__ ):
return self.create_dummy_data_dict(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , (list, tuple) ):
return self.create_dummy_data_list(snake_case__ , snake_case__ )
else:
return self.create_dummy_data_single(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , *snake_case__ ):
"""simple docstring"""
return self.download_and_extract(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
return self.download_and_extract(snake_case__ )
def lowercase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return path
def lowercase__ ( self ):
"""simple docstring"""
return {}
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case__ , snake_case__ ):
for single_url in single_urls:
download_callback(snake_case__ )
else:
lowerCAmelCase : List[str] = single_urls
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = [os.path.join(snake_case__ , urllib.parse.quote_plus(Path(snake_case__ ).name ) ) for x in single_urls]
else:
lowerCAmelCase : int = single_urls
lowerCAmelCase : Any = os.path.join(snake_case__ , urllib.parse.quote_plus(Path(snake_case__ ).name ) )
lowerCAmelCase : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(snake_case__ , snake_case__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase : Union[str, Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase : Optional[Any] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , snake_case__ ) ) for url in data_url )
lowerCAmelCase : Any = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase : int = [data_url[0]] * len(snake_case__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase : Dict = os.path.join(snake_case__ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(snake_case__ )
return dummy_data_list
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase : Tuple = os.path.join(snake_case__ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(snake_case__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
def _iter_archive_members(snake_case__ ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase : str = Path(self.dummy_file ).parent
lowerCAmelCase : Optional[Any] = path.relative_to(snake_case__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase : List[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case__ )
lowerCAmelCase : List[Any] = Path(snake_case__ )
lowerCAmelCase : str = _iter_archive_members(snake_case__ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(snake_case__ ).as_posix(), file_path.open("rb" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(snake_case__ ):
if os.path.basename(snake_case__ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case__ ):
if os.path.basename(snake_case__ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(snake_case__ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(snake_case__ , snake_case__ )
| 108
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=18 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Any:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Dict = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : int ) -> str:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Dict ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : List[str] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Tuple ) -> List[str]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Any = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 270
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def snake_case_ ( snake_case = 3 ) -> qiskit.result.counts.Counts:
if isinstance(snake_case , snake_case ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(snake_case ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
lowercase__: str = QuantumRegister(snake_case , 'qr' )
lowercase__: str = ClassicalRegister(snake_case , 'cr' )
lowercase__: List[Any] = QuantumCircuit(snake_case , snake_case )
lowercase__: int = number_of_qubits
for i in range(snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case , snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case , snake_case )
# simulate with 10000 shots
lowercase__: str = Aer.get_backend('qasm_simulator' )
lowercase__: Union[str, Any] = execute(snake_case , snake_case , shots=1_00_00 )
return job.result().get_counts(snake_case )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 288
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase : Any = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self , snake_case = 1 , snake_case = None , snake_case = 0.0 , snake_case = 5_0 , snake_case = None , snake_case = "pil" , snake_case = True , ):
'''simple docstring'''
if isinstance(self.unet.config.sample_size , snake_case ):
UpperCAmelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCAmelCase : List[Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(snake_case )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
UpperCAmelCase : Union[str, Any] = randn_tensor(snake_case , generator=snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase : List[Any] = self.unet(snake_case , snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase : Union[str, Any] = self.scheduler.step(
snake_case , snake_case , snake_case , eta=snake_case , use_clipped_model_output=snake_case , generator=snake_case ).prev_sample
UpperCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Dict = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 311
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase : Optional[int] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 350
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a_ ( __lowercase : Dict ) -> int:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : List[str] ) -> str:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Optional[Any]="facebook/mbart-large-en-ro" , __lowercase : Union[str, Any]=False , __lowercase : Optional[Any]=False ) -> int:
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = state_dict['encoder.embed_tokens.weight'].shape[0]
_snake_case = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_snake_case = 'relu'
_snake_case = state_dict['decoder.embed_tokens.weight']
_snake_case = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_snake_case = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase : Tuple = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 130
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : int = '''marian'''
_A : Optional[Any] = ['''past_key_values''']
_A : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , lowerCAmelCase__ : Tuple=5_8_1_0_1 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any=1_0_2_4 , lowerCAmelCase__ : Dict=1_2 , lowerCAmelCase__ : Tuple=4_0_9_6 , lowerCAmelCase__ : int=1_6 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : List[Any]=4_0_9_6 , lowerCAmelCase__ : int=1_6 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Dict=1_0_2_4 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Optional[Any]=5_8_1_0_0 , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Tuple=5_8_1_0_0 , lowerCAmelCase__ : Optional[int]=0 , lowerCAmelCase__ : int=0 , lowerCAmelCase__ : Optional[int]=True , **lowerCAmelCase__ : str , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Dict = decoder_vocab_size or vocab_size
__SCREAMING_SNAKE_CASE : int = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = d_model
__SCREAMING_SNAKE_CASE : Tuple = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : int = encoder_layers
__SCREAMING_SNAKE_CASE : str = encoder_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layers
__SCREAMING_SNAKE_CASE : Dict = decoder_attention_heads
__SCREAMING_SNAKE_CASE : str = dropout
__SCREAMING_SNAKE_CASE : Tuple = attention_dropout
__SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout
__SCREAMING_SNAKE_CASE : Any = activation_function
__SCREAMING_SNAKE_CASE : Optional[Any] = init_std
__SCREAMING_SNAKE_CASE : int = encoder_layerdrop
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : str = encoder_layers
__SCREAMING_SNAKE_CASE : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE : List[Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE : Dict = {0: """batch"""}
__SCREAMING_SNAKE_CASE : int = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE : Tuple = {0: """batch""", 1: """decoder_sequence"""}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
__SCREAMING_SNAKE_CASE : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : str = super().outputs
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.num_layers
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = {0: """batch""", 2: """past_sequence + sequence"""}
__SCREAMING_SNAKE_CASE : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE : List[Any] = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE : List[str] = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = common_inputs["""input_ids"""].shape
__SCREAMING_SNAKE_CASE : Optional[int] = common_inputs["""decoder_input_ids"""].shape[1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.num_attention_heads
__SCREAMING_SNAKE_CASE : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : str = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
__SCREAMING_SNAKE_CASE : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
__SCREAMING_SNAKE_CASE : Optional[int] = min(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
__SCREAMING_SNAKE_CASE : int = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE : str = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE : str = seqlen + 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_layers
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : List[str] = common_inputs["""attention_mask"""].dtype
__SCREAMING_SNAKE_CASE : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
__SCREAMING_SNAKE_CASE : Tuple = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE : Any = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : Tuple = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : List[str] = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return 1E-4
| 112
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase__ : int | None = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
__SCREAMING_SNAKE_CASE : Node | None = None # Added in order to delete a node easier
__SCREAMING_SNAKE_CASE : Node | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
def __repr__( self : Optional[Any] ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"{self.value}": (self.left, self.right)} , indent=1 )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase__ : Node | None = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = root
def __str__( self : Union[str, Any] ):
"""simple docstring"""
return str(self.root )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Node , lowerCAmelCase__ : Node | None ):
"""simple docstring"""
if new_children is not None: # reset its kids
__SCREAMING_SNAKE_CASE : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
__SCREAMING_SNAKE_CASE : Any = new_children
else:
__SCREAMING_SNAKE_CASE : int = new_children
else:
__SCREAMING_SNAKE_CASE : int = new_children
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Node ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.root is None
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
__SCREAMING_SNAKE_CASE : Optional[int] = new_node # set its root
else: # Tree is not empty
__SCREAMING_SNAKE_CASE : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__SCREAMING_SNAKE_CASE : List[str] = new_node # We insert the new node in a leaf
break
else:
__SCREAMING_SNAKE_CASE : Any = parent_node.left
else:
if parent_node.right is None:
__SCREAMING_SNAKE_CASE : Tuple = new_node
break
else:
__SCREAMING_SNAKE_CASE : List[str] = parent_node.right
__SCREAMING_SNAKE_CASE : Tuple = parent_node
def UpperCamelCase__ ( self : str , *lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
__SCREAMING_SNAKE_CASE : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__SCREAMING_SNAKE_CASE : Any = node.left if value < node.value else node.right
return node
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Node | None = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
__SCREAMING_SNAKE_CASE : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
__SCREAMING_SNAKE_CASE : Tuple = node.right
return node
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Node | None = None ):
"""simple docstring"""
if node is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.root
if self.root is None:
return None
if not self.empty():
__SCREAMING_SNAKE_CASE : Optional[Any] = self.root
while node.left is not None:
__SCREAMING_SNAKE_CASE : Any = node.left
return node
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ , node.left )
else:
__SCREAMING_SNAKE_CASE : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__SCREAMING_SNAKE_CASE : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Node | None ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : list , lowerCAmelCase__ : Node | None ):
"""simple docstring"""
if node:
self.inorder(lowerCAmelCase__ , node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ , node.right )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Node ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : list[int] = []
self.inorder(lowerCAmelCase__ , lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCAmelCase_ ( _lowerCamelCase: Node | None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = []
if curr_node is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__SCREAMING_SNAKE_CASE : Dict = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 112
| 1
|
"""simple docstring"""
_a = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
assert type(__a ) in (int, float) and decimal == int(__a )
_UpperCamelCase = int(__a )
_UpperCamelCase = ''''''
_UpperCamelCase = False
if decimal < 0:
_UpperCamelCase = True
decimal *= -1
while decimal > 0:
_UpperCamelCase = divmod(__a, 16 )
_UpperCamelCase = values[remainder] + hexadecimal
_UpperCamelCase = '''0x''' + hexadecimal
if negative:
_UpperCamelCase = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'dandelin/vilt-b32-finetuned-vqa'
lowercase__ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
lowercase__ = 'image_qa'
lowercase__ = AutoProcessor
lowercase__ = AutoModelForVisualQuestionAnswering
lowercase__ = ['image', 'text']
lowercase__ = ['text']
def __init__( self , *__a , **__a) -> int:
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__a , **__a)
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return self.pre_processor(__a , __a , return_tensors='''pt''')
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.model(**__a).logits
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 100
| 0
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33
| 1
|
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__lowerCAmelCase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class __magic_name__ :
lowerCAmelCase : str
lowerCAmelCase : Optional[str] = None
lowerCAmelCase : Optional[Union[str, int]] = None
lowerCAmelCase : Optional[Union[str, int]] = None
lowerCAmelCase : Optional[Union[str, int]] = None
def __lowercase ( self : Optional[int] ):
_a , _a , _a : int = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] ):
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def __lowercase ( self : Union[str, Any] ):
return self.major, self.minor, self.patch
def __lowercase ( self : Dict ,_UpperCAmelCase : Tuple ):
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return Version(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return other
raise TypeError(F"""{other} (type {type(_UpperCAmelCase )}) cannot be compared to version.""" )
def __eq__( self : List[str] ,_UpperCAmelCase : int ):
try:
_a : Tuple = self._validate_operand(_UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str ,_UpperCAmelCase : Optional[int] ):
_a : Tuple = self._validate_operand(_UpperCAmelCase )
return self.tuple < other.tuple
def __hash__( self : List[str] ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __lowercase ( cls : Any ,_UpperCAmelCase : int ):
_a : Optional[int] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __lowercase ( self : List[str] ):
return self.version_str
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
_a : List[str] = _VERSION_REG.match(lowerCAmelCase_ )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(lowerCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
return ".".join(str(lowerCAmelCase_ ) for v in version_tuple )
| 107
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowerCAmelCase = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowerCAmelCase = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowerCAmelCase = os.environ.get('''USER_TOKEN''', '''''')
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict[Any, Any]:
_a : Union[str, Any] = {
'Authorization': f"""token {auth_token}""",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 107
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
_SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
_SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
_SCREAMING_SNAKE_CASE = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(A ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
_SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
_SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , return_all_scores=A )
self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
_SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , return_all_scores=A )
self.assertEqual(
nested_simplify(A ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
_SCREAMING_SNAKE_CASE = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=A )
self.assertEqual(
nested_simplify(A ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
_SCREAMING_SNAKE_CASE = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=A )
self.assertEqual(
nested_simplify(A ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def snake_case_( self ) -> Tuple:
import torch
_SCREAMING_SNAKE_CASE = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
_SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
_SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = pipeline("""text-classification""" )
_SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
_SCREAMING_SNAKE_CASE = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(A ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
_SCREAMING_SNAKE_CASE = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = pipeline("""text-classification""" , framework="""tf""" )
_SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
_SCREAMING_SNAKE_CASE = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(A ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
_SCREAMING_SNAKE_CASE = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def snake_case_( self , A , A , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = TextClassificationPipeline(model=A , tokenizer=A )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case_( self , A , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_SCREAMING_SNAKE_CASE = """HuggingFace is in"""
_SCREAMING_SNAKE_CASE = text_classifier(A )
self.assertEqual(nested_simplify(A ) , [{"""label""": ANY(A ), """score""": ANY(A )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
_SCREAMING_SNAKE_CASE = ["""HuggingFace is in """, """Paris is in France"""]
_SCREAMING_SNAKE_CASE = text_classifier(A )
self.assertEqual(
nested_simplify(A ) , [{"""label""": ANY(A ), """score""": ANY(A )}, {"""label""": ANY(A ), """score""": ANY(A )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_SCREAMING_SNAKE_CASE = text_classifier(A , top_k=A )
_SCREAMING_SNAKE_CASE = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(A ) , [[{"""label""": ANY(A ), """score""": ANY(A )}] * N, [{"""label""": ANY(A ), """score""": ANY(A )}] * N] , )
_SCREAMING_SNAKE_CASE = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
_SCREAMING_SNAKE_CASE = text_classifier(A )
self.assertEqual(
nested_simplify(A ) , {"""label""": ANY(A ), """score""": ANY(A )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_SCREAMING_SNAKE_CASE = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(A ):
text_classifier(A )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_SCREAMING_SNAKE_CASE = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(A ) , [{"""label""": ANY(A ), """score""": ANY(A )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 58
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> List[str]:
'''simple docstring'''
A__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A__ = F'{src_lang}-{tgt_lang}'
A__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(F'Generating {path}' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase__ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model_name.split("""-""")
lowerCAmelCase__ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 68
| 0
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Optional[int] = 1_6
lowercase__ : List[str] = 3_2
def UpperCamelCase_ ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase_ : Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : Dict = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : str = 8
else:
lowerCAmelCase_ : str = None
return tokenizer.pad(
lowerCAmelCase__ , padding='longest' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : int = mocked_dataloaders # noqa: F811
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase__ ) == "1":
lowerCAmelCase_ : Optional[int] = 2
# Initialize accelerator
lowerCAmelCase_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Optional[int] = config['lr']
lowerCAmelCase_ : Tuple = int(config['num_epochs'] )
lowerCAmelCase_ : int = int(config['seed'] )
lowerCAmelCase_ : str = int(config['batch_size'] )
lowerCAmelCase_ : str = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase__ )
def inner_training_loop(lowerCAmelCase__ : Optional[int] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : int = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : List[Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
lowerCAmelCase_ ,lowerCAmelCase_ : Any = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate scheduler
lowerCAmelCase_ : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ : List[str] = model(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = outputs.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowerCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , lowerCAmelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase_ : str = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 289
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : int, *UpperCAmelCase__ : Dict, **UpperCAmelCase__ : Optional[Any] ):
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead.", UpperCAmelCase__, )
super().__init__(*UpperCAmelCase__, **UpperCAmelCase__ )
| 17
|
import requests
def A ( lowercase , lowercase ) -> None:
'''simple docstring'''
UpperCamelCase = {'Content-Type': 'application/json'}
UpperCamelCase = requests.post(lowercase , json={'text': message_body} , headers=lowercase )
if response.status_code != 200:
UpperCamelCase = (
'Request to slack returned an error '
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 222
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
lowerCamelCase = num_of_nodes
lowerCamelCase = []
lowerCamelCase = {}
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCamelCase = self.find_component(_a )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowerCamelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_a )
elif component_size[u_node] >= component_size[v_node]:
lowerCamelCase = self.find_component(_a )
component_size[u_node] += component_size[v_node]
self.set_component(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = []
lowerCamelCase = 0
lowerCamelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCamelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCamelCase , lowerCamelCase , lowerCamelCase = edge
lowerCamelCase = self.m_component[u]
lowerCamelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCamelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_a , _a ):
lowerCamelCase , lowerCamelCase , lowerCamelCase = edge
lowerCamelCase = self.m_component[u]
lowerCamelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_a , _a , _a )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
lowerCamelCase = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def a__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
|
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ ) -> int:
return number | (1 << position)
def a__ ( snake_case__ , snake_case__ ) -> int:
return number & ~(1 << position)
def a__ ( snake_case__ , snake_case__ ) -> int:
return number ^ (1 << position)
def a__ ( snake_case__ , snake_case__ ) -> bool:
return ((number >> position) & 1) == 1
def a__ ( snake_case__ , snake_case__ ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
| 1
|
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list[int]:
lowerCamelCase__ : Any = [0] * no_of_processes
lowerCamelCase__ : Optional[int] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = burst_time[i]
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Tuple = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCamelCase__ : int = []
lowerCamelCase__ : Optional[int] = -1
for i in range(UpperCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase )
if len(UpperCamelCase ) > 0:
lowerCamelCase__ : Union[str, Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCamelCase__ : int = i
total_time += burst_time[target_process]
completed += 1
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : List[str] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list[int]:
lowerCamelCase__ : Optional[int] = [0] * no_of_processes
for i in range(UpperCamelCase ):
lowerCamelCase__ : int = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
_A : int =4
_A : Any =[2, 5, 3, 7]
_A : Dict =[0, 0, 0, 0]
_A : int =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_A : int =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 41
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=14 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0.02 , ) -> Any:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = rotary_dim
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = None
_lowerCAmelCase = vocab_size - 1
_lowerCAmelCase = vocab_size - 1
_lowerCAmelCase = vocab_size - 1
def _snake_case ( self ) -> str:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(_lowerCAmelCase )
_lowerCAmelCase = model.init_cache(input_ids.shape[0] , _lowerCAmelCase )
_lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(_lowerCAmelCase )
_lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
_lowerCAmelCase = model.init_cache(input_ids.shape[0] , _lowerCAmelCase )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : str = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = FlaxGPTJModelTester(self )
def _snake_case ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> int:
for model_class_name in self.all_model_classes:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@tooslow
def _snake_case ( self ) -> Any:
_lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
_lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
_lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
_lowerCAmelCase = False
_lowerCAmelCase = model.config.eos_token_id
_lowerCAmelCase = jax.jit(model.generate )
_lowerCAmelCase = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
_lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@is_pt_flax_cross_test
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = pt_inputs["input_ids"].shape
_lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = pt_model_class(_lowerCAmelCase ).eval()
_lowerCAmelCase = model_class(_lowerCAmelCase , dtype=jnp.floataa )
_lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCAmelCase )
_lowerCAmelCase = fx_state
with torch.no_grad():
_lowerCAmelCase = pt_model(**_lowerCAmelCase ).to_tuple()
_lowerCAmelCase = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model_class.from_pretrained(_lowerCAmelCase , from_pt=_lowerCAmelCase )
_lowerCAmelCase = fx_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(
len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def _snake_case ( self ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = pt_model_class(_lowerCAmelCase ).eval()
_lowerCAmelCase = model_class(_lowerCAmelCase , dtype=jnp.floataa )
_lowerCAmelCase = load_flax_weights_in_pytorch_model(_lowerCAmelCase , fx_model.params )
_lowerCAmelCase , _lowerCAmelCase = pt_inputs["input_ids"].shape
_lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
_lowerCAmelCase = pt_model(**_lowerCAmelCase ).to_tuple()
_lowerCAmelCase = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = pt_model_class.from_pretrained(_lowerCAmelCase , from_flax=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = pt_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(
len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def _snake_case ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
_lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 158
| 0
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
def update_area_of_max_square(SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase : Optional[Any] = update_area_of_max_square(lowerCAmelCase__ , col + 1 )
__lowerCAmelCase : Optional[Any] = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase : Any = update_area_of_max_square(row + 1 , lowerCAmelCase__ )
if mat[row][col]:
__lowerCAmelCase : Union[str, Any] = 1 + min([right, diagonal, down] )
__lowerCAmelCase : List[Any] = max(largest_square_area[0] , lowerCAmelCase__ )
return sub_problem_sol
else:
return 0
__lowerCAmelCase : Optional[Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase : Union[str, Any] = update_area_of_max_square_using_dp_array(lowerCAmelCase__ , col + 1 , lowerCAmelCase__ )
__lowerCAmelCase : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowerCAmelCase__ )
__lowerCAmelCase : List[Any] = update_area_of_max_square_using_dp_array(row + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
if mat[row][col]:
__lowerCAmelCase : Union[str, Any] = 1 + min([right, diagonal, down] )
__lowerCAmelCase : str = max(largest_square_area[0] , lowerCAmelCase__ )
__lowerCAmelCase : str = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase : List[Any] = [0]
__lowerCAmelCase : int = [[-1] * cols for _ in range(lowerCAmelCase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowerCAmelCase__ )
return largest_square_area[0]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase : Any = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase : Tuple = dp_array[row][col + 1]
__lowerCAmelCase : int = dp_array[row + 1][col + 1]
__lowerCAmelCase : Optional[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase : Union[str, Any] = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCAmelCase : Dict = max(dp_array[row][col] , lowerCAmelCase__ )
else:
__lowerCAmelCase : Tuple = 0
return largest_square_area
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : List[str] = [0] * (cols + 1)
__lowerCAmelCase : Tuple = [0] * (cols + 1)
__lowerCAmelCase : List[str] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase : str = current_row[col + 1]
__lowerCAmelCase : Optional[Any] = next_row[col + 1]
__lowerCAmelCase : List[Any] = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase : int = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCAmelCase : str = max(current_row[col] , lowerCAmelCase__ )
else:
__lowerCAmelCase : int = 0
__lowerCAmelCase : Dict = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 366
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case_ ( __lowercase ):
A_ = 'biogpt'
def __init__( self : int , _snake_case : Any=42384 , _snake_case : Any=1024 , _snake_case : List[Any]=24 , _snake_case : Any=16 , _snake_case : List[str]=4096 , _snake_case : Dict="gelu" , _snake_case : Tuple=0.1 , _snake_case : str=0.1 , _snake_case : Tuple=1024 , _snake_case : Tuple=0.02 , _snake_case : Tuple=1E-12 , _snake_case : Optional[int]=True , _snake_case : Optional[int]=True , _snake_case : Any=0.0 , _snake_case : Tuple=0.0 , _snake_case : str=1 , _snake_case : Dict=0 , _snake_case : str=2 , **_snake_case : Union[str, Any] , )->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = max_position_embeddings
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Any = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : Optional[int] = scale_embedding
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : str = layerdrop
__lowerCAmelCase : Dict = activation_dropout
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
| 232
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def snake_case_ (_a : Any=None ):
if subparsers is not None:
UpperCAmelCase = subparsers.add_parser('''test''' )
else:
UpperCAmelCase = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=_a , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_a )
return parser
def snake_case_ (_a : Tuple ):
UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
UpperCAmelCase = script_name
else:
UpperCAmelCase = F"--config_file={args.config_file} {script_name}"
UpperCAmelCase = ['''accelerate-launch'''] + test_args.split()
UpperCAmelCase = execute_subprocess_async(_a , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def snake_case_ ():
UpperCAmelCase = test_command_parser()
UpperCAmelCase = parser.parse_args()
test_command(_a )
if __name__ == "__main__":
main()
| 34
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A =logging.getLogger()
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase = parser.parse_args()
return args.f
def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ):
UpperCAmelCase = os.path.join(_a , F"{split}_results.json" )
if os.path.exists(_a ):
with open(_a , '''r''' ) as f:
return json.load(_a )
raise ValueError(F"can't find {path}" )
A =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( __a ):
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_glue.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_clm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_summarization_flax.main()
UpperCAmelCase = get_results(lowercase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_ta_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_ner.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_qa.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.01 , _SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = search_prob
UpperCamelCase = start_temperate
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = None
while not search_end:
UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase = current_state
scores.append(_SCREAMING_SNAKE_CASE )
iterations += 1
UpperCamelCase = None
UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
UpperCamelCase = neighbors.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase = picked_neighbor
else:
UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase = picked_neighbor
UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase = True
else:
UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 244
| 0
|
def A ( lowercase ) -> bool:
'''simple docstring'''
UpperCamelCase = 0
for ch in input_str:
UpperCamelCase = ord(lowercase )
UpperCamelCase = pow(2 , lowercase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 222
| 1
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCamelCase_ = logging.getLogger(__name__)
lowerCamelCase_ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=A , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=A , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=A , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=A , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
SCREAMING_SNAKE_CASE_ : bool = field(default=A , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
SCREAMING_SNAKE_CASE_ : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
SCREAMING_SNAKE_CASE_ : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
SCREAMING_SNAKE_CASE_ : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __lowerCamelCase ( a_ : DataTrainingArguments , a_ : PreTrainedTokenizer , a_ : bool = False , a_ : Optional[str] = None , ) -> List[str]:
def _dataset(a_ : int , a_ : Dict=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=a_ , file_path=a_ , block_size=args.block_size , ref_path=a_ , )
return LineByLineTextDataset(tokenizer=a_ , file_path=a_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=a_ , file_path=a_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=a_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(a_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __lowerCamelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE :Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , a_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__SCREAMING_SNAKE_CASE :List[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE :List[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__SCREAMING_SNAKE_CASE :List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__SCREAMING_SNAKE_CASE :Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE :Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE :List[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__SCREAMING_SNAKE_CASE :List[Any] = AutoModelWithLMHead.from_config(a_ )
model.resize_token_embeddings(len(a_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__SCREAMING_SNAKE_CASE :Any = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__SCREAMING_SNAKE_CASE :Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__SCREAMING_SNAKE_CASE :int = (
get_dataset(a_ , tokenizer=a_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__SCREAMING_SNAKE_CASE :List[Any] = (
get_dataset(a_ , tokenizer=a_ , evaluate=a_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__SCREAMING_SNAKE_CASE :Dict = DataCollatorForPermutationLanguageModeling(
tokenizer=a_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__SCREAMING_SNAKE_CASE :Optional[int] = DataCollatorForWholeWordMask(
tokenizer=a_ , mlm_probability=data_args.mlm_probability )
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=a_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__SCREAMING_SNAKE_CASE :List[str] = Trainer(
model=a_ , args=a_ , data_collator=a_ , train_dataset=a_ , eval_dataset=a_ , prediction_loss_only=a_ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE :Optional[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=a_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__SCREAMING_SNAKE_CASE :List[str] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = trainer.evaluate()
__SCREAMING_SNAKE_CASE :Dict = math.exp(eval_output['''eval_loss'''] )
__SCREAMING_SNAKE_CASE :int = {'''perplexity''': perplexity}
__SCREAMING_SNAKE_CASE :Dict = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , a_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(a_ )
return results
def __lowerCamelCase ( a_ : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 239
|
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Any = UniSpeechSatForSequenceClassification.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :int = downstream_dict['''projector.weight''']
__SCREAMING_SNAKE_CASE :List[Any] = downstream_dict['''projector.bias''']
__SCREAMING_SNAKE_CASE :Union[str, Any] = downstream_dict['''model.post_net.linear.weight''']
__SCREAMING_SNAKE_CASE :List[str] = downstream_dict['''model.post_net.linear.bias''']
return model
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : List[Any] , a_ : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Any = UniSpeechSatForAudioFrameClassification.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :List[str] = downstream_dict['''model.linear.weight''']
__SCREAMING_SNAKE_CASE :Union[str, Any] = downstream_dict['''model.linear.bias''']
return model
def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[Any] , a_ : int ) -> List[str]:
__SCREAMING_SNAKE_CASE :List[str] = UniSpeechSatForXVector.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :Optional[int] = downstream_dict['''connector.weight''']
__SCREAMING_SNAKE_CASE :Tuple = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__SCREAMING_SNAKE_CASE :str = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__SCREAMING_SNAKE_CASE :int = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__SCREAMING_SNAKE_CASE :Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__SCREAMING_SNAKE_CASE :Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__SCREAMING_SNAKE_CASE :Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__SCREAMING_SNAKE_CASE :Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__SCREAMING_SNAKE_CASE :str = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __lowerCamelCase ( a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE :str = torch.load(a_ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE :str = checkpoint['''Downstream''']
__SCREAMING_SNAKE_CASE :str = UniSpeechSatConfig.from_pretrained(a_ )
__SCREAMING_SNAKE_CASE :List[str] = WavaVecaFeatureExtractor.from_pretrained(
a_ , return_attention_mask=a_ , do_normalize=a_ )
__SCREAMING_SNAKE_CASE :Optional[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__SCREAMING_SNAKE_CASE :str = convert_classification(a_ , a_ , a_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__SCREAMING_SNAKE_CASE :Tuple = convert_diarization(a_ , a_ , a_ )
elif arch.endswith('''ForXVector''' ):
__SCREAMING_SNAKE_CASE :List[Any] = convert_xvector(a_ , a_ , a_ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__SCREAMING_SNAKE_CASE :Dict = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
lowerCamelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 239
| 1
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: list[int] ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [0] * no_of_processes
SCREAMING_SNAKE_CASE : int = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(snake_case_ ):
SCREAMING_SNAKE_CASE : Any = burst_time[i]
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Optional[int] = 9_99_99_99_99
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Dict = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(snake_case_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
SCREAMING_SNAKE_CASE : str = remaining_time[j]
SCREAMING_SNAKE_CASE : Tuple = j
SCREAMING_SNAKE_CASE : str = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
SCREAMING_SNAKE_CASE : Any = remaining_time[short]
if minm == 0:
SCREAMING_SNAKE_CASE : Optional[int] = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Find finish time of current process
SCREAMING_SNAKE_CASE : Union[str, Any] = increment_time + 1
# Calculate waiting time
SCREAMING_SNAKE_CASE : Optional[int] = finish_time - arrival_time[short]
SCREAMING_SNAKE_CASE : List[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
SCREAMING_SNAKE_CASE : Dict = 0
# Increment time
increment_time += 1
return waiting_time
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: int ,__UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [0] * no_of_processes
for i in range(snake_case_ ):
SCREAMING_SNAKE_CASE : Dict = burst_time[i] + waiting_time[i]
return turn_around_time
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: list[int] ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for i in range(snake_case_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = total_waiting_time + waiting_time[i]
SCREAMING_SNAKE_CASE : List[str] = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('Average turn around time =' ,total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
UpperCamelCase_ = int(input())
UpperCamelCase_ = [0] * no_of_processes
UpperCamelCase_ = [0] * no_of_processes
UpperCamelCase_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
UpperCamelCase_ , UpperCamelCase_ = map(int, input().split())
UpperCamelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCamelCase_ = burst_time
UpperCamelCase_ = no_of_processes
UpperCamelCase_ = waiting_time
UpperCamelCase_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCamelCase_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 251
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """bert"""
def __init__( self , __lowerCamelCase=3_0522 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
__A : Dict = vocab_size
__A : Any = hidden_size
__A : str = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = hidden_act
__A : List[Any] = intermediate_size
__A : Tuple = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Optional[Any] = type_vocab_size
__A : Optional[Any] = initializer_range
__A : Dict = layer_norm_eps
__A : Any = position_embedding_type
__A : Optional[int] = use_cache
__A : str = classifier_dropout
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCamelCase__( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__A : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__A : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 179
| 0
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaForSequenceClassification.from_pretrained(lowercase , config=lowercase )
SCREAMING_SNAKE_CASE : Dict = downstream_dict["projector.weight"]
SCREAMING_SNAKE_CASE : str = downstream_dict["projector.bias"]
SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["model.post_net.linear.weight"]
SCREAMING_SNAKE_CASE : Any = downstream_dict["model.post_net.linear.bias"]
return model
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaForAudioFrameClassification.from_pretrained(lowercase , config=lowercase )
SCREAMING_SNAKE_CASE : List[str] = downstream_dict["model.linear.weight"]
SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict["model.linear.bias"]
return model
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaForXVector.from_pretrained(lowercase , config=lowercase )
SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["connector.weight"]
SCREAMING_SNAKE_CASE : List[str] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE : Dict = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
SCREAMING_SNAKE_CASE : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
SCREAMING_SNAKE_CASE : Any = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = torch.load(lowercase , map_location="cpu" )
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint["Downstream"]
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaConfig.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE : str = WavaVecaFeatureExtractor.from_pretrained(
lowercase , return_attention_mask=lowercase , do_normalize=lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
SCREAMING_SNAKE_CASE : int = convert_classification(lowercase , lowercase , lowercase )
elif arch.endswith("ForAudioFrameClassification" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = convert_diarization(lowercase , lowercase , lowercase )
elif arch.endswith("ForXVector" ):
SCREAMING_SNAKE_CASE : List[Any] = convert_xvector(lowercase , lowercase , lowercase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE : Dict = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
snake_case = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 319
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 319
| 1
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
"""simple docstring"""
_lowerCamelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def __snake_case ( ):
if os.name == "nt":
lowerCamelCase_ = CursorInfo()
lowerCamelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
lowerCamelCase_ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def __snake_case ( ):
if os.name == "nt":
lowerCamelCase_ = CursorInfo()
lowerCamelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
lowerCamelCase_ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def __snake_case ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 55
|
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple:
'''simple docstring'''
UpperCAmelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273
| 0
|
"""simple docstring"""
__UpperCamelCase : str = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__UpperCamelCase : Union[str, Any] = {value: key for key, value in MORSE_CODE_DICT.items()}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = "Morse code here!"
print(_snake_case )
lowerCAmelCase = encrypt(_snake_case )
print(_snake_case )
lowerCAmelCase = decrypt(_snake_case )
print(_snake_case )
if __name__ == "__main__":
main()
| 369
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ):
lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0]
lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 309
| 0
|
'''simple docstring'''
import random
from typing import Any
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[Any]:
for _ in range(len(UpperCamelCase ) ):
lowerCamelCase__ : List[Any] = random.randint(0 , len(UpperCamelCase ) - 1 )
lowerCamelCase__ : Tuple = random.randint(0 , len(UpperCamelCase ) - 1 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
_A : int =[0, 1, 2, 3, 4, 5, 6, 7]
_A : Optional[Any] =['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 41
|
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 39
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( UpperCAmelCase_ : Iterable[str] , UpperCAmelCase_ : int ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
__snake_case : Optional[int] = iter(UpperCAmelCase_ )
while True:
__snake_case : Optional[int] = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : Any = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__snake_case : Union[str, Any] = ''
if len(UpperCAmelCase_ ) < 2:
return dirty
for i in range(len(UpperCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase_ ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> list[str]:
'''simple docstring'''
__snake_case : List[str] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__snake_case : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase_ )
return table
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : str = generate_table(UpperCAmelCase_ )
__snake_case : Union[str, Any] = prepare_input(UpperCAmelCase_ )
__snake_case : Tuple = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
__snake_case , __snake_case : Any = divmod(table.index(UpperCAmelCase_ ) , 5 )
__snake_case , __snake_case : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = generate_table(UpperCAmelCase_ )
__snake_case : Any = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
__snake_case , __snake_case : Union[str, Any] = divmod(table.index(UpperCAmelCase_ ) , 5 )
__snake_case , __snake_case : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 95
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : str = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344
|
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
A_ : List[Any] = parent
A_ : int = config_class
A_ : int = has_text_modality
A_ : str = kwargs
A_ : int = common_properties
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : Optional[int] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
A_ : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = self.config_class(**self.inputs_dict )
A_ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" )
config_first.to_json_file(_lowerCamelCase )
A_ : Dict = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : List[Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_class.is_composition:
return
A_ : Dict = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = copy.deepcopy(_lowerCamelCase )
A_ : Tuple = self.config_class(**_lowerCamelCase )
A_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344
| 1
|
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
snake_case_ : Dict = 0
for i in range(1 , 1_001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 366
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = []
snake_case_ : List[str] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if i == 0 else self.out_channels
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : str = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Union[str, Any] = resnets
snake_case_ : Union[str, Any] = attentions
if self.add_downsample:
snake_case_ : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : List[str] = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Union[str, Any] = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = []
for i in range(self.num_layers ):
snake_case_ : List[Any] = self.in_channels if i == 0 else self.out_channels
snake_case_ : Tuple = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Dict = resnets
if self.add_downsample:
snake_case_ : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = ()
for resnet in self.resnets:
snake_case_ : List[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : str = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = []
for i in range(self.num_layers ):
snake_case_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : List[Any] = resnets
snake_case_ : Tuple = attentions
if self.add_upsample:
snake_case_ : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Union[str, Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ : Dict = res_hidden_states_tuple[-1]
snake_case_ : List[Any] = res_hidden_states_tuple[:-1]
snake_case_ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Tuple = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Optional[Any] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Tuple = resnets
if self.add_upsample:
snake_case_ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
snake_case_ : Tuple = res_hidden_states_tuple[-1]
snake_case_ : List[Any] = res_hidden_states_tuple[:-1]
snake_case_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Optional[int] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ : int = []
for _ in range(self.num_layers ):
snake_case_ : str = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Optional[Any] = resnets
snake_case_ : Optional[int] = attentions
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.resnets[0](__magic_name__ , __magic_name__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Union[str, Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
return hidden_states
| 279
| 0
|
from __future__ import annotations
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : str ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text, pattern
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = len(__magic_name__ ), len(__magic_name__ )
def __A ( self : List[str] , __magic_name__ : str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __A ( self : Dict , __magic_name__ : int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __A ( self : Tuple ) -> list[int]:
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE_ = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE_ = self.mismatch_in_text(__magic_name__ )
if mismatch_index == -1:
positions.append(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A : Dict = "ABAABA"
A : Union[str, Any] = "AB"
A : str = BoyerMooreSearch(text, pattern)
A : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 118
|
from functools import lru_cache
@lru_cache
def a__ ( __UpperCamelCase ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118
| 1
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Tuple ):
# Load checkpoint
UpperCAmelCase : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
UpperCAmelCase : List[Any] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase : List[str] = v
else:
UpperCAmelCase : int = v
UpperCAmelCase : str = chkpt["""params"""]
UpperCAmelCase : Optional[Any] = {n: v for n, v in config.items() if not isinstance(UpperCamelCase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase : Any = chkpt["""dico_word2id"""]
UpperCAmelCase : Union[str, Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase : Optional[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCAmelCase : List[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
UpperCAmelCase : Dict = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(UpperCamelCase , UpperCamelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , indent=2 ) + """\n""" )
print(F"Save vocab file to {pytorch_config_dump_path}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , indent=2 ) + """\n""" )
if __name__ == "__main__":
A: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A: Optional[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 76
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = value_function
UpperCAmelCase : Dict = unet
UpperCAmelCase : Union[str, Any] = scheduler
UpperCAmelCase : List[Any] = env
UpperCAmelCase : int = env.get_dataset()
UpperCAmelCase : Optional[int] = {}
for key in self.data.keys():
try:
UpperCAmelCase : Dict = self.data[key].mean()
except: # noqa: E722
pass
UpperCAmelCase : int = {}
for key in self.data.keys():
try:
UpperCAmelCase : Optional[Any] = self.data[key].std()
except: # noqa: E722
pass
UpperCAmelCase : Any = env.observation_space.shape[0]
UpperCAmelCase : str = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if type(_SCREAMING_SNAKE_CASE ) is dict:
return {k: self.to_torch(_SCREAMING_SNAKE_CASE ) for k, v in x_in.items()}
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ):
return x_in.to(self.unet.device )
return torch.tensor(_SCREAMING_SNAKE_CASE , device=self.unet.device )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
for key, val in cond.items():
UpperCAmelCase : Optional[Any] = val.clone()
return x_in
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = x.shape[0]
UpperCAmelCase : Optional[int] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCAmelCase : Tuple = torch.full((batch_size,) , _SCREAMING_SNAKE_CASE , device=self.unet.device , dtype=torch.long )
for _ in range(_SCREAMING_SNAKE_CASE ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCAmelCase : Dict = self.value_function(x.permute(0 , 2 , 1 ) , _SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : Optional[int] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCAmelCase : List[Any] = self.scheduler._get_variance(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = torch.exp(0.5 * posterior_variance )
UpperCAmelCase : str = model_std * grad
UpperCAmelCase : str = 0
UpperCAmelCase : Any = x.detach()
UpperCAmelCase : int = x + scale * grad
UpperCAmelCase : Any = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : Optional[int] = self.unet(x.permute(0 , 2 , 1 ) , _SCREAMING_SNAKE_CASE ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCAmelCase : Any = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , predict_epsilon=_SCREAMING_SNAKE_CASE )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCAmelCase : Dict = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : int = self.to_torch(_SCREAMING_SNAKE_CASE )
return x, y
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = self.normalize(_SCREAMING_SNAKE_CASE , """observations""" )
UpperCAmelCase : int = obs[None].repeat(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCAmelCase : Dict = {0: self.to_torch(_SCREAMING_SNAKE_CASE )}
UpperCAmelCase : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCAmelCase : str = randn_tensor(_SCREAMING_SNAKE_CASE , device=self.unet.device )
UpperCAmelCase : Any = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : str = self.to_torch(_SCREAMING_SNAKE_CASE )
# run the diffusion process
UpperCAmelCase , UpperCAmelCase : Any = self.run_diffusion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# sort output trajectories by value
UpperCAmelCase : List[str] = y.argsort(0 , descending=_SCREAMING_SNAKE_CASE ).squeeze()
UpperCAmelCase : Any = x[sorted_idx]
UpperCAmelCase : Dict = sorted_values[:, :, : self.action_dim]
UpperCAmelCase : int = actions.detach().cpu().numpy()
UpperCAmelCase : List[str] = self.de_normalize(_SCREAMING_SNAKE_CASE , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCAmelCase : Any = 0
else:
# if we didn't run value guiding, select a random action
UpperCAmelCase : Optional[int] = np.random.randint(0 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = denorm_actions[selected_index, 0]
return denorm_actions
| 76
| 1
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> List[str]:
a = "hf-internal-testing/tiny-random-t5"
a = AutoTokenizer.from_pretrained(__lowerCamelCase )
a = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
a = tokenizer("This is me" , return_tensors="pt" )
a = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
a = model.generate(**__lowerCamelCase )
a = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
a = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
a = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = "hf-internal-testing/tiny-random-t5"
a = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
a = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 107
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """openai-gpt"""
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __lowerCamelCase : List[str]=4_04_78 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : List[str]=7_68 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[int]="cls_index" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=0.1 , **__lowerCamelCase : Union[str, Any] , ) -> List[str]:
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCamelCase )
| 107
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ : List[str] = logging.get_logger(__name__)
a_ : List[str] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """marian"""
_lowerCAmelCase = ["""past_key_values"""]
_lowerCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __magic_name__=5_81_01 , __magic_name__=None , __magic_name__=10_24 , __magic_name__=12 , __magic_name__=40_96 , __magic_name__=16 , __magic_name__=12 , __magic_name__=40_96 , __magic_name__=16 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__=True , __magic_name__="gelu" , __magic_name__=10_24 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0_2 , __magic_name__=5_81_00 , __magic_name__=False , __magic_name__=5_81_00 , __magic_name__=0 , __magic_name__=0 , __magic_name__=True , **__magic_name__ , ) -> int:
_a = vocab_size
_a = decoder_vocab_size or vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
_a = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class a ( _SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_a = {0: 'batch'}
_a = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_a = {0: 'batch', 1: 'decoder_sequence'}
_a = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__magic_name__ ):
_a = {0: 'batch', 2: 'past_sequence + sequence'}
_a = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__magic_name__ , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__magic_name__ ):
_a = {0: 'batch', 2: 'past_sequence + sequence'}
_a = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a = common_inputs['input_ids'].shape
_a = common_inputs['decoder_input_ids'].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__magic_name__ , __magic_name__ )
_a = max(__magic_name__ , __magic_name__ ) - min_num_layers
_a = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['attention_mask'].dtype
_a = torch.cat(
[common_inputs['attention_mask'], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
_a = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__magic_name__ )
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
_a = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
_a = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
_a = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-4
| 104
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
a_ : Optional[Any] = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , *__magic_name__ , **__magic_name__ ) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 104
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def _UpperCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _UpperCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
@property
def _UpperCamelCase ( self ) -> Optional[int]:
def extract(*_A , **_A ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = torch.ones([0] )
def _UpperCamelCase ( self , _A ) -> Any:
self.pixel_values.to(_SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=_SCREAMING_SNAKE_CASE , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=_SCREAMING_SNAKE_CASE , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(pipe.scheduler , _SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
SCREAMING_SNAKE_CASE_ = unet.half()
SCREAMING_SNAKE_CASE_ = vae.half()
SCREAMING_SNAKE_CASE_ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
SCREAMING_SNAKE_CASE_ = 4003660346
SCREAMING_SNAKE_CASE_ = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = '''padme amidala taking a bath artwork, safe for work, no nudity'''
SCREAMING_SNAKE_CASE_ = 2734971755
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
SCREAMING_SNAKE_CASE_ = 1044355234
SCREAMING_SNAKE_CASE_ = 12
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 299
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "glpn"
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE=[32, 64, 160, 256] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , _SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=-1 , **_SCREAMING_SNAKE_CASE , )->Any:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = num_channels
A_ : Union[str, Any] = num_encoder_blocks
A_ : int = depths
A_ : Dict = sr_ratios
A_ : Any = hidden_sizes
A_ : int = patch_sizes
A_ : Optional[int] = strides
A_ : str = mlp_ratios
A_ : List[str] = num_attention_heads
A_ : str = hidden_act
A_ : int = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : Tuple = drop_path_rate
A_ : Optional[int] = layer_norm_eps
A_ : List[str] = decoder_hidden_size
A_ : List[Any] = max_depth
A_ : List[Any] = head_in_index
| 186
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __a ( A__ ):
_lowerCAmelCase : str = '''decision_transformer'''
_lowerCAmelCase : Optional[Any] = ['''past_key_values''']
_lowerCAmelCase : Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=17 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Optional[int]=1_28 , SCREAMING_SNAKE_CASE : Dict=40_96 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Optional[int]=10_24 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict="relu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1e-5 , SCREAMING_SNAKE_CASE : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : int=5_02_56 , SCREAMING_SNAKE_CASE : List[Any]=5_02_56 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=False , **SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = state_dim
UpperCamelCase__ : Optional[int] = act_dim
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Any = max_ep_len
UpperCamelCase__ : Optional[Any] = action_tanh
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : List[str] = n_positions
UpperCamelCase__ : Tuple = n_layer
UpperCamelCase__ : Union[str, Any] = n_head
UpperCamelCase__ : Dict = n_inner
UpperCamelCase__ : int = activation_function
UpperCamelCase__ : List[str] = resid_pdrop
UpperCamelCase__ : Optional[int] = embd_pdrop
UpperCamelCase__ : Optional[Any] = attn_pdrop
UpperCamelCase__ : Optional[Any] = layer_norm_epsilon
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Dict = scale_attn_weights
UpperCamelCase__ : Tuple = use_cache
UpperCamelCase__ : List[str] = scale_attn_by_inverse_layer_idx
UpperCamelCase__ : Union[str, Any] = reorder_and_upcast_attn
UpperCamelCase__ : Optional[Any] = bos_token_id
UpperCamelCase__ : Optional[int] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 364
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __a ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : List[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ : List[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCamelCase__ : Tuple = {"unk_token": "<unk>"}
UpperCamelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
UpperCamelCase__ : int = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCamelCase__ : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : List[str] = self.get_rust_tokenizer()
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : List[str] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : str = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase__ : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.prepare_image_inputs()
UpperCamelCase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
UpperCamelCase__ : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = "lower newer"
UpperCamelCase__ : int = processor(text=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = "lower newer"
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : Tuple = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Optional[int] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : Optional[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : Tuple = self.get_tokenizer()
UpperCamelCase__ : Dict = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = "lower newer"
UpperCamelCase__ : List[str] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 196
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = """albert"""
def __init__(self : Optional[int] , __a : Optional[int]=30000 , __a : Optional[Any]=128 , __a : Any=4096 , __a : Tuple=12 , __a : int=1 , __a : str=64 , __a : Optional[int]=16384 , __a : Optional[Any]=1 , __a : Dict="gelu_new" , __a : Dict=0 , __a : Dict=0 , __a : int=512 , __a : Optional[Any]=2 , __a : Tuple=0.02 , __a : Any=1E-12 , __a : List[Any]=0.1 , __a : Optional[Any]="absolute" , __a : Union[str, Any]=0 , __a : Union[str, Any]=2 , __a : str=3 , **__a : Union[str, Any] , ):
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_hidden_groups
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = inner_group_num
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = position_embedding_type
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Tuple ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 1
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxBertModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxRobertaModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''bert-base is not a local folder and is not a valid model identifier''' ):
A__ = FlaxAutoModel.from_pretrained('''bert-base''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase,revision='''aaaaaa''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''',):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(__lowerCamelCase,'''Use `from_pt=True` to load this model''' ):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 193
| 0
|
class _UpperCamelCase :
def __init__( self: Optional[Any] ) -> None:
"""simple docstring"""
UpperCamelCase_ = {} # Mapping from char to TrieNode
UpperCamelCase_ = False
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> None:
"""simple docstring"""
for word in words:
self.insert(_a )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Any ) -> None:
"""simple docstring"""
UpperCamelCase_ = self
for char in word:
if char not in curr.nodes:
UpperCamelCase_ = TrieNode()
UpperCamelCase_ = curr.nodes[char]
UpperCamelCase_ = True
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[str] ) -> bool:
"""simple docstring"""
UpperCamelCase_ = self
for char in word:
if char not in curr.nodes:
return False
UpperCamelCase_ = curr.nodes[char]
return curr.is_leaf
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> None:
"""simple docstring"""
def _delete(_SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCamelCase_ = False
return len(curr.nodes ) == 0
UpperCamelCase_ = word[index]
UpperCamelCase_ = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCamelCase_ = _delete(_a , _a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _a , 0 )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> None:
if node.is_leaf:
print(__a , end=" " )
for key, value in node.nodes.items():
print_words(__a , word + key )
def lowerCAmelCase_ ( ) -> bool:
UpperCamelCase_ = '''banana bananas bandana band apple all beast'''.split()
UpperCamelCase_ = TrieNode()
root.insert_many(__a )
# print_words(root, "")
assert all(root.find(__a ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> None:
print(str(__a ) , "works!" if passes else "doesn\'t work :(" )
def lowerCAmelCase_ ( ) -> None:
assert test_trie()
def lowerCAmelCase_ ( ) -> None:
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 365
|
from functools import lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> set:
UpperCamelCase_ = 2
UpperCamelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase_ )
if n > 1:
factors.add(UpperCamelCase_ )
return factors
@lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return len(unique_prime_factors(UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
return len(set(UpperCamelCase_ ) ) in (0, 1)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = 2
while True:
# Increment each value of a generated range
UpperCamelCase_ = [base + i for i in range(UpperCamelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCamelCase_ = [upf_len(UpperCamelCase_ ) for x in group]
checker.append(UpperCamelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase_ ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase_ ( UpperCamelCase_ = 4 ) -> int:
UpperCamelCase_ = run(UpperCamelCase_ )
return results[0] if len(UpperCamelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 328
| 0
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , snake_case_ : Dict[str, int] , snake_case_ : List[str] , snake_case_ : int = None , snake_case_ : int = None ):
super().__init__()
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = max_length
_UpperCAmelCase = vocab
_UpperCAmelCase = merges
_UpperCAmelCase = BytePairTokenizer(snake_case_ , snake_case_ , sequence_length=snake_case_ )
@classmethod
def lowercase ( cls : Optional[int] , snake_case_ : GPTaTokenizer , *snake_case_ : List[Any] , **snake_case_ : Any ):
_UpperCAmelCase = [" ".join(snake_case_ ) for m in tokenizer.bpe_ranks.keys()]
_UpperCAmelCase = tokenizer.get_vocab()
return cls(snake_case_ , snake_case_ , *snake_case_ , **snake_case_ )
@classmethod
def lowercase ( cls : Optional[int] , snake_case_ : Union[str, os.PathLike] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ):
_UpperCAmelCase = GPTaTokenizer.from_pretrained(snake_case_ , *snake_case_ , **snake_case_ )
return cls.from_tokenizer(snake_case_ , *snake_case_ , **snake_case_ )
@classmethod
def lowercase ( cls : int , snake_case_ : List[Any] ):
return cls(**snake_case_ )
def lowercase ( self : str ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase ( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int = None ):
_UpperCAmelCase = self.tf_tokenizer(snake_case_ )
_UpperCAmelCase = tf.ones_like(snake_case_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_UpperCAmelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
_UpperCAmelCase , _UpperCAmelCase = pad_model_inputs(
snake_case_ , max_seq_length=snake_case_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 22
|
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowercase : Optional[int] = len(_UpperCAmelCase ) + 1
lowercase : Any = len(_UpperCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase : Tuple = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
# since string of zero length match pattern of zero length
lowercase : List[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _UpperCAmelCase ):
lowercase : Tuple = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _UpperCAmelCase ):
lowercase : Tuple = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _UpperCAmelCase ):
for j in range(1 , _UpperCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase : List[str] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase : Any = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase : List[Any] = dp[i - 1][j]
else:
lowercase : Optional[int] = 0
else:
lowercase : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase: int = 'aab'
_UpperCamelCase: Tuple = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 255
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( _A , _A , _A ):
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def lowerCamelCase__ ( _A , _A , _A , ):
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCamelCase__ ( _A , _A , _A , ):
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
_A , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[int] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96
| 0
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , ):
lowerCamelCase : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
lowerCamelCase : Any = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : int = num_channels
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : int = min_resolution
lowerCamelCase : Union[str, Any] = max_resolution
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
lowerCamelCase : Any = do_normalize
def UpperCamelCase__ ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """clusters""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase : List[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__magic_name__ , obj[key] ) )
else:
self.assertEqual(obj[key] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Any = os.path.join(__magic_name__ , """image_processor.json""" )
image_processor_first.to_json_file(__magic_name__ )
lowerCamelCase : Tuple = self.image_processing_class.from_json_file(__magic_name__ ).to_dict()
lowerCamelCase : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__magic_name__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__magic_name__ )
lowerCamelCase : int = self.image_processing_class.from_pretrained(__magic_name__ ).to_dict()
lowerCamelCase : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__magic_name__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __magic_name__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCamelCase__ ( self ):
pass
def _a ( ):
lowerCamelCase : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""", split="""test""" )
lowerCamelCase : Tuple = Image.open(dataset[4]["""file"""] )
lowerCamelCase : Optional[int] = Image.open(dataset[5]["""file"""] )
lowerCamelCase : str = [imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowerCamelCase : Optional[int] = prepare_images()
# test non-batched
lowerCamelCase : Any = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
lowerCamelCase : Any = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __magic_name__ )
# test batched
lowerCamelCase : List[str] = image_processing(__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
lowerCamelCase : int = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __magic_name__ )
| 287
|
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287
| 1
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __A ( lowerCamelCase__ ):
def _snake_case ( self ):
lowerCamelCase =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self ):
with self.assertRaises(__lowerCamelCase ):
lowerCamelCase =pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self ):
with self.assertRaises(__lowerCamelCase ):
lowerCamelCase =pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self ):
lowerCamelCase =pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase =pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self ):
lowerCamelCase =pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self ):
lowerCamelCase =pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self ):
lowerCamelCase =pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase =pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self ):
lowerCamelCase =pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self ):
lowerCamelCase =pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self ):
import PIL.Image
lowerCamelCase =PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__lowerCamelCase ) as mock_cast_to_python_objects:
lowerCamelCase =pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
lowerCamelCase =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __lowerCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase =pa.BufferReader(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , pa.Buffer ) else pa.memory_map(lowerCamelCase_ )
lowerCamelCase =pa.ipc.open_stream(lowerCamelCase_ )
lowerCamelCase =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
lowerCamelCase =pa.BufferOutputStream()
lowerCamelCase =pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCamelCase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowercase ( ) -> Optional[int]:
lowerCamelCase =pa.BufferOutputStream()
lowerCamelCase =Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=lowerCamelCase_ , features=lowerCamelCase_ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
lowerCamelCase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCamelCase =pa.BufferReader(output.getvalue() )
lowerCamelCase =pa.ipc.open_stream(lowerCamelCase_ )
lowerCamelCase =f.read_all()
lowerCamelCase =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCamelCase_ )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def _lowercase ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt="""split_name""" , check_duplicates=lowerCamelCase_ , ) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
lowerCamelCase =writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _lowercase ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt="""split_name""" , check_duplicates=lowerCamelCase_ , ) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
lowerCamelCase =writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _lowercase ( _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt="""split_name""" , check_duplicates=lowerCamelCase_ , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
lowerCamelCase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
lowerCamelCase =pa.BufferOutputStream()
lowerCamelCase =pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
lowerCamelCase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase =pa.BufferOutputStream()
lowerCamelCase =pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
lowerCamelCase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase =pa.BufferOutputStream()
lowerCamelCase =pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
lowerCamelCase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowercase ( ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
lowerCamelCase =os.path.join(lowerCamelCase_ , """test.arrow""" )
with ArrowWriter(path=lowerCamelCase_ , schema=pa.schema(lowerCamelCase_ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
lowerCamelCase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(lowerCamelCase_ , 1 )
def _lowercase ( _UpperCAmelCase ) -> str:
if pa.types.is_list(lowerCamelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
if isinstance(lst[0] , lowerCamelCase_ ):
change_first_primitive_element_in_list(lst[0] , lowerCamelCase_ )
else:
lowerCamelCase =value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase =pa.array(TypedSequence(lowerCamelCase_ , optimized_int_type=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
lowerCamelCase =pa.array(OptimizedTypedSequence(lowerCamelCase_ , col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCamelCase =copy.deepcopy(lowerCamelCase_ )
lowerCamelCase =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase =pa.array(OptimizedTypedSequence(lowerCamelCase_ , col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase =str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=lowerCamelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowercase ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase ='''mock://dataset-train.arrow'''
with ArrowWriter(path=lowerCamelCase_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowerCamelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCamelCase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCamelCase_ )
def _lowercase ( ) -> str:
lowerCamelCase =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCamelCase_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCamelCase =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCamelCase =pa.BufferReader(output.getvalue() )
lowerCamelCase =pq.read_table(lowerCamelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
import PIL.Image
lowerCamelCase =str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowerCamelCase_ , format="""png""" )
lowerCamelCase =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCamelCase_ , features=Features({"""image""": Image()} ) , embed_local_files=lowerCamelCase_ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
lowerCamelCase =pa.BufferReader(output.getvalue() )
lowerCamelCase =pq.read_table(lowerCamelCase_ )
lowerCamelCase =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , lowerCamelCase_ )
with open(lowerCamelCase_ , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowercase ( ) -> int:
lowerCamelCase =pa.schema([pa.field("""col_1""" , pa.string() , nullable=lowerCamelCase_ )] )
lowerCamelCase =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCamelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCamelCase_ )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 356
|
import inspect
import unittest
class __A ( unittest.TestCase ):
def _snake_case ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _snake_case ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
lowerCamelCase =inspect.getmembers(UpperCAmelCase_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCamelCase ="""k-diffusion"""
elif backend == "invisible_watermark":
lowerCamelCase ="""invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 262
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case_( unittest.TestCase ):
def __init__( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : int=3 , UpperCamelCase_ : Dict=1_8 , UpperCamelCase_ : List[Any]=3_0 , UpperCamelCase_ : str=4_0_0 , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : List[Any]=True , ):
lowerCAmelCase : Dict = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase : str = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Optional[int] = min_resolution
lowerCAmelCase : List[Any] = max_resolution
lowerCAmelCase : Dict = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : int = apply_ocr
def lowerCamelCase__ ( self : List[str] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = LayoutLMvaImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''apply_ocr''' ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
def lowerCamelCase__ ( self : int ):
pass
def lowerCamelCase__ ( self : int ):
# Initialize image_processing
lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase_ )
self.assertIsInstance(encoding.boxes , UpperCamelCase_ )
# Test batched
lowerCAmelCase : Tuple = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase : Any = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase : Any = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase__ ( self : int ):
# with apply_OCR = True
lowerCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
lowerCAmelCase : Optional[int] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
lowerCAmelCase : List[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
lowerCAmelCase : List[Any] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase_ )
self.assertListEqual(encoding.boxes , UpperCamelCase_ )
# with apply_OCR = False
lowerCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 60
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class snake_case_:
def __init__( self : str , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=None ):
# Input as list
lowerCAmelCase : str = list(poly_a or [0] )[:]
lowerCAmelCase : Any = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCAmelCase : Optional[int] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCAmelCase : Union[str, Any] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCAmelCase : str = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCAmelCase : int = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCAmelCase : int = self.__multiply()
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str ):
lowerCAmelCase : Optional[Any] = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCamelCase_ ) <= 1:
return dft[0]
#
lowerCAmelCase : Tuple = self.c_max_length // 2
while next_ncol > 0:
lowerCAmelCase : Dict = [[] for i in range(UpperCamelCase_ )]
lowerCAmelCase : List[Any] = self.root**next_ncol
# First half of next step
lowerCAmelCase : Dict = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCAmelCase : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCAmelCase : Optional[Any] = new_dft
lowerCAmelCase : Union[str, Any] = next_ncol // 2
return dft[0]
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.__dft('''A''' )
lowerCAmelCase : Optional[int] = self.__dft('''B''' )
lowerCAmelCase : Any = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCAmelCase : str = 2
while next_ncol <= self.c_max_length:
lowerCAmelCase : Union[str, Any] = [[] for i in range(UpperCamelCase_ )]
lowerCAmelCase : Optional[Any] = self.root ** (next_ncol // 2)
lowerCAmelCase : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCAmelCase : Any = new_inverse_c
next_ncol *= 2
# Unpack
lowerCAmelCase : Optional[int] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : int ):
lowerCAmelCase : int = '''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCAmelCase : str = '''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCAmelCase : int = '''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ (_a, _a, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = CycleDiffusionPipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'latents'}
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
_lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A_ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
A_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : Union[str, Any] = CLIPTextModel(snake_case_ )
A_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : str=0 ):
"""simple docstring"""
A_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
A_ : List[str] = image / 2 + 0.5
if str(snake_case_ ).startswith('''mps''' ):
A_ : Tuple = torch.manual_seed(snake_case_ )
else:
A_ : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A_ : str = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : List[Any] = CycleDiffusionPipeline(**snake_case_ )
A_ : List[str] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A_ : Tuple = self.get_dummy_inputs(snake_case_ )
A_ : Any = pipe(**snake_case_ )
A_ : Tuple = output.images
A_ : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : List[Any] = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[str] = self.get_dummy_components()
for name, module in components.items():
if hasattr(snake_case_ , '''half''' ):
A_ : Optional[int] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**snake_case_ )
A_ : Union[str, Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A_ : int = self.get_dummy_inputs(snake_case_ )
A_ : List[Any] = pipe(**snake_case_ )
A_ : Any = output.images
A_ : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : List[Any] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a ( self : List[Any] ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _a ( self : int ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _a ( self : Dict ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a ( self : int ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _a ( self : Optional[int] ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
A_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
A_ : Tuple = init_image.resize((512, 512) )
A_ : List[Any] = """CompVis/stable-diffusion-v1-4"""
A_ : Tuple = DDIMScheduler.from_pretrained(snake_case_ , subfolder='''scheduler''' )
A_ : Optional[int] = CycleDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
A_ : int = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : Dict = torch.manual_seed(0 )
A_ : Dict = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type='''np''' , )
A_ : Union[str, Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a ( self : int ):
"""simple docstring"""
A_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
A_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
A_ : Dict = init_image.resize((512, 512) )
A_ : Tuple = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(snake_case_ , subfolder='''scheduler''' )
A_ : str = CycleDiffusionPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
A_ : Tuple = """A black colored car"""
A_ : List[Any] = """A blue colored car"""
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type='''np''' , )
A_ : List[Any] = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 370
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any=None ):
UpperCamelCase_ : Optional[int] = None
if token is not None:
UpperCamelCase_ : Any = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
UpperCamelCase_ : Any = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCamelCase_ : Union[str, Any] = requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
UpperCamelCase_ : Any = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
UpperCamelCase_ : Optional[Any] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCAmelCase_ ):
UpperCamelCase_ : int = requests.get(url + F"&page={i + 2}" , headers=lowerCAmelCase_ ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]=None ):
UpperCamelCase_ : List[str] = None
if token is not None:
UpperCamelCase_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
UpperCamelCase_ : str = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
UpperCamelCase_ : Union[str, Any] = requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
UpperCamelCase_ : Union[str, Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
UpperCamelCase_ : str = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCAmelCase_ ):
UpperCamelCase_ : List[Any] = requests.get(url + F"&page={i + 2}" , headers=lowerCAmelCase_ ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __lowercase ( lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] ):
UpperCamelCase_ : Dict = None
if token is not None:
UpperCamelCase_ : Any = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
UpperCamelCase_ : Dict = requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ , allow_redirects=lowerCAmelCase_ )
UpperCamelCase_ : Optional[Any] = result.headers['Location']
UpperCamelCase_ : Tuple = requests.get(lowerCAmelCase_ , allow_redirects=lowerCAmelCase_ )
UpperCamelCase_ : str = os.path.join(lowerCAmelCase_ , F"{artifact_name}.zip" )
with open(lowerCAmelCase_ , 'wb' ) as fp:
fp.write(response.content )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple=None ):
UpperCamelCase_ : Any = []
UpperCamelCase_ : Optional[Any] = []
UpperCamelCase_ : str = None
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCAmelCase_ ) as f:
for line in f:
UpperCamelCase_ : Union[str, Any] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCamelCase_ : List[str] = line[: line.index(': ' )]
UpperCamelCase_ : Tuple = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
UpperCamelCase_ : Union[str, Any] = line[len('FAILED ' ) :]
failed_tests.append(lowerCAmelCase_ )
elif filename == "job_name.txt":
UpperCamelCase_ : List[Any] = line
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase_ )} for `errors` "
F"and {len(lowerCAmelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
' problem.' )
UpperCamelCase_ : List[str] = None
if job_name and job_links:
UpperCamelCase_ : Optional[Any] = job_links.get(lowerCAmelCase_ , lowerCAmelCase_ )
# A list with elements of the form (line of error, error, failed test)
UpperCamelCase_ : List[str] = [x + [y] + [job_link] for x, y in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
return result
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : str=None ):
UpperCamelCase_ : Dict = []
UpperCamelCase_ : Any = [os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) for p in os.listdir(lowerCAmelCase_ ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCAmelCase_ , job_links=lowerCAmelCase_ ) )
return errors
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any=None ):
UpperCamelCase_ : Any = Counter()
counter.update([x[1] for x in logs] )
UpperCamelCase_ : Optional[Any] = counter.most_common()
UpperCamelCase_ : List[str] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCamelCase_ : Any = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCamelCase_ : List[str] = dict(sorted(r.items() , key=lambda lowerCamelCase : item[1]["count"] , reverse=lowerCAmelCase_ ) )
return r
def __lowercase ( lowerCamelCase : List[str] ):
UpperCamelCase_ : int = test.split('::' )[0]
if test.startswith('tests/models/' ):
UpperCamelCase_ : Union[str, Any] = test.split('/' )[2]
else:
UpperCamelCase_ : Tuple = None
return test
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Dict=None ):
UpperCamelCase_ : Optional[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCamelCase_ : List[str] = [x for x in logs if x[2] is not None]
UpperCamelCase_ : List[Any] = {x[2] for x in logs}
UpperCamelCase_ : str = {}
for test in tests:
UpperCamelCase_ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCamelCase_ : Union[str, Any] = counter.most_common()
UpperCamelCase_ : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCamelCase_ : List[Any] = sum(error_counts.values() )
if n_errors > 0:
UpperCamelCase_ : List[str] = {'count': n_errors, 'errors': error_counts}
UpperCamelCase_ : int = dict(sorted(r.items() , key=lambda lowerCamelCase : item[1]["count"] , reverse=lowerCAmelCase_ ) )
return r
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : List[Any] = '| no. | error | status |'
UpperCamelCase_ : str = '|-:|:-|:-|'
UpperCamelCase_ : List[str] = [header, sep]
for error in reduced_by_error:
UpperCamelCase_ : Union[str, Any] = reduced_by_error[error]['count']
UpperCamelCase_ : Optional[Any] = F"| {count} | {error[:100]} | |"
lines.append(lowerCAmelCase_ )
return "\n".join(lowerCAmelCase_ )
def __lowercase ( lowerCamelCase : List[Any] ):
UpperCamelCase_ : List[Any] = '| model | no. of errors | major error | count |'
UpperCamelCase_ : List[Any] = '|-:|-:|-:|-:|'
UpperCamelCase_ : Optional[int] = [header, sep]
for model in reduced_by_model:
UpperCamelCase_ : List[str] = reduced_by_model[model]['count']
UpperCamelCase_, UpperCamelCase_ : Tuple = list(reduced_by_model[model]['errors'].items() )[0]
UpperCamelCase_ : int = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(lowerCAmelCase_ )
return "\n".join(lowerCAmelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
a_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a_ = get_job_links(args.workflow_run_id, token=args.token)
a_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a_ = k.find(' / ')
a_ = k[index + len(' / ') :]
a_ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a_ = reduce_by_error(errors)
a_ = reduce_by_model(errors)
a_ = make_github_table(reduced_by_error)
a_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 175
|
def _lowerCAmelCase ( lowerCAmelCase_ :int | float | str )->tuple[int, int]:
'''simple docstring'''
try:
snake_case_ = float(lowerCAmelCase_ )
except ValueError:
raise ValueError("Please enter a valid number" )
snake_case_ = decimal - int(lowerCAmelCase_ )
if fractional_part == 0:
return int(lowerCAmelCase_ ), 1
else:
snake_case_ = len(str(lowerCAmelCase_ ).split("." )[1] )
snake_case_ = int(decimal * (10**number_of_frac_digits) )
snake_case_ = 10**number_of_frac_digits
snake_case_ , snake_case_ = denominator, numerator
while True:
snake_case_ = dividend % divisor
if remainder == 0:
break
snake_case_ , snake_case_ = divisor, remainder
snake_case_ , snake_case_ = numerator / divisor, denominator / divisor
return int(lowerCAmelCase_ ), int(lowerCAmelCase_ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 159
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A_ = datasets.utils.logging.get_logger(__name__)
A_ = ['''names''', '''prefix''']
A_ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
A_ = ['''encoding_errors''', '''on_bad_lines''']
A_ = ['''date_format''']
@dataclass
class __SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
snake_case_ = ','
snake_case_ = None
snake_case_ = 'infer'
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = False
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = None
snake_case_ = '.'
snake_case_ = None
snake_case_ = '"'
snake_case_ = 0
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = None
snake_case_ = 10000
snake_case_ = None
snake_case_ = 'strict'
snake_case_ = 'error'
snake_case_ = None
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.delimiter is not None:
A__ : Tuple = self.delimiter
if self.column_names is not None:
A__ : str = self.column_names
@property
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[Any] = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
snake_case_ = CsvConfig
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(snake_case , snake_case ):
A__ : List[str] = [files]
A__ : Union[str, Any] = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : str = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
A__ : List[Any] = [files]
A__ : List[str] = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={"""files""": files} ) )
return splits
def _UpperCamelCase ( self : List[str] , snake_case : pa.Table ):
'''simple docstring'''
if self.config.features is not None:
A__ : int = self.config.features.arrow_schema
if all(not require_storage_cast(snake_case ) for feature in self.config.features.values() ):
# cheaper cast
A__ : int = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A__ : Dict = table_cast(snake_case , snake_case )
return pa_table
def _UpperCamelCase ( self : int , snake_case : Dict ):
'''simple docstring'''
A__ : Optional[int] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A__ : int = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
A__ : Optional[int] = pd.read_csv(snake_case , iterator=snake_case , dtype=snake_case , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(snake_case ):
A__ : Union[str, Any] = pa.Table.from_pandas(snake_case )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(snake_case )}: {e}' )
raise
| 356
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'table-transformer'
snake_case_ = ['past_key_values']
snake_case_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , snake_case : int=True , snake_case : Dict=None , snake_case : Union[str, Any]=3 , snake_case : Dict=100 , snake_case : Tuple=6 , snake_case : Optional[int]=2048 , snake_case : int=8 , snake_case : Dict=6 , snake_case : Any=2048 , snake_case : str=8 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=0.0 , snake_case : List[str]=True , snake_case : Any="relu" , snake_case : str=256 , snake_case : int=0.1 , snake_case : Dict=0.0 , snake_case : str=0.0 , snake_case : Union[str, Any]=0.02 , snake_case : Union[str, Any]=1.0 , snake_case : Optional[Any]=False , snake_case : int="sine" , snake_case : Optional[Any]="resnet50" , snake_case : Optional[int]=True , snake_case : Any=False , snake_case : int=1 , snake_case : Tuple=5 , snake_case : Optional[int]=2 , snake_case : Tuple=1 , snake_case : Optional[Any]=1 , snake_case : Optional[Any]=5 , snake_case : Dict=2 , snake_case : Any=0.1 , **snake_case : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(snake_case , snake_case ):
A__ : Optional[int] = backbone_config.get("""model_type""" )
A__ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A__ : List[str] = config_class.from_dict(snake_case )
# set timm attributes to None
A__ , A__ , A__ : str = None, None, None
A__ : Tuple = use_timm_backbone
A__ : str = backbone_config
A__ : str = num_channels
A__ : List[Any] = num_queries
A__ : Optional[Any] = d_model
A__ : Tuple = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : List[Any] = encoder_attention_heads
A__ : Optional[int] = decoder_ffn_dim
A__ : Any = decoder_layers
A__ : int = decoder_attention_heads
A__ : Any = dropout
A__ : Dict = attention_dropout
A__ : Dict = activation_dropout
A__ : Tuple = activation_function
A__ : List[str] = init_std
A__ : List[str] = init_xavier_std
A__ : Any = encoder_layerdrop
A__ : Optional[Any] = decoder_layerdrop
A__ : Union[str, Any] = encoder_layers
A__ : Dict = auxiliary_loss
A__ : List[Any] = position_embedding_type
A__ : Optional[Any] = backbone
A__ : str = use_pretrained_backbone
A__ : Union[str, Any] = dilation
# Hungarian matcher
A__ : Tuple = class_cost
A__ : Optional[Any] = bbox_cost
A__ : Dict = giou_cost
# Loss coefficients
A__ : Any = mask_loss_coefficient
A__ : str = dice_loss_coefficient
A__ : str = bbox_loss_coefficient
A__ : Union[str, Any] = giou_loss_coefficient
A__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return self.d_model
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return 1e-5
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return 12
| 296
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.