code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from math import factorial
def __snake_case ( UpperCAmelCase_ : int = 100 ):
return sum(int(UpperCAmelCase_ ) for x in str(factorial(UpperCAmelCase_ ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 55
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a ( unittest.TestCase ):
_a : List[str] = JukeboxTokenizer
_a : List[Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 329
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 356
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _lowerCAmelCase ( lowerCamelCase_ : dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__lowercase = set()
# keep track of all the paths to be checked
__lowercase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowercase = queue.pop(0 )
# get the last node from the path
__lowercase = path[-1]
if node not in explored:
__lowercase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowercase = list(lowerCamelCase_ )
new_path.append(lowerCamelCase_ )
queue.append(lowerCamelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCamelCase_ )
# in case there's no path between the 2 nodes
return []
def _lowerCAmelCase ( lowerCamelCase_ : dict , lowerCamelCase_ : str , lowerCamelCase_ : str ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowercase = [start]
__lowercase = set(lowerCamelCase_ )
# Keep tab on distances from `start` node.
__lowercase = {start: 0, target: -1}
while queue:
__lowercase = queue.pop(0 )
if node == target:
__lowercase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCamelCase_ )
queue.append(lowerCamelCase_ )
__lowercase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 217
| 0
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=16 , UpperCamelCase_=36 , UpperCamelCase_=6 , UpperCamelCase_=6 , UpperCamelCase_=6 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = seq_length
UpperCamelCase__ :Optional[int] = is_training
UpperCamelCase__ :Union[str, Any] = use_input_mask
UpperCamelCase__ :List[str] = use_token_type_ids
UpperCamelCase__ :List[Any] = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :Dict = embedding_size
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :List[Any] = num_hidden_groups
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_size
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :List[str] = type_vocab_size
UpperCamelCase__ :Dict = type_sequence_label_size
UpperCamelCase__ :Union[str, Any] = initializer_range
UpperCamelCase__ :List[Any] = num_labels
UpperCamelCase__ :Union[str, Any] = num_choices
UpperCamelCase__ :Tuple = scope
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Dict = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ :int = None
UpperCamelCase__ :str = None
UpperCamelCase__ :Optional[Any] = None
if self.use_labels:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = AlbertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
UpperCamelCase__ :int = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = AlbertForPreTraining(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , sentence_order_label=UpperCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = AlbertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = AlbertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = self.num_labels
UpperCamelCase__ :Any = AlbertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Union[str, Any] = AlbertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.num_choices
UpperCamelCase__ :Optional[int] = AlbertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ :Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ :Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ :Any = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = config_and_inputs
UpperCamelCase__ :List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
UpperCamelCase__ :Tuple = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
UpperCamelCase__ :Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ )
UpperCamelCase__ :str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = AlbertModelTester(self )
UpperCamelCase__ :int = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Any = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Optional[int] = AlbertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = AlbertModel.from_pretrained('''albert-base-v2''' )
UpperCamelCase__ :int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase__ :List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
UpperCamelCase__ :Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1e-4 ) )
| 97
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowerCAmelCase : str = False
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Union[str, Any] = False
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
__lowerCAmelCase : Optional[int] = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
__lowerCAmelCase : str = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
__lowerCAmelCase : Any = reader.read()
__lowerCAmelCase : int = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
__lowerCAmelCase : Any = UNetaDModel(**config)
else:
__lowerCAmelCase : List[str] = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
__lowerCAmelCase : str = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowerCAmelCase : Dict = config[key]
del config[key]
__lowerCAmelCase : int = [k.replace("UNetRes", "") for k in config["down_block_types"]]
__lowerCAmelCase : Optional[Any] = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
__lowerCAmelCase : Any = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
__lowerCAmelCase : Tuple = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
__lowerCAmelCase : Dict = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
__lowerCAmelCase : Union[str, Any] = param_value
__lowerCAmelCase : str = True
if not has_changed:
__lowerCAmelCase : Union[str, Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 156
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase_ )
class _UpperCamelCase ( lowerCAmelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_UpperCamelCase : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCamelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_UpperCamelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
_UpperCamelCase : str = "text"
_UpperCamelCase : str = "labels"
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCamelCase_ = copy.deepcopy(self )
UpperCamelCase_ = self.label_schema.copy()
UpperCamelCase_ = features[self.label_column]
UpperCamelCase_ = label_schema
return task_template
@property
def lowercase ( self: str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 353
|
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = int(UpperCamelCase_ )
if n_element < 1:
UpperCamelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCamelCase_ = [1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0)
UpperCamelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_UpperCAmelCase = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_UpperCAmelCase = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 328
| 0
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = checkpoint
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.conv_in.weight''']
SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict['''encoder.conv_in.bias''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.conv_out.weight''']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['''encoder.conv_out.bias''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.norm_out.weight''']
SCREAMING_SNAKE_CASE_ : str = vae_state_dict['''encoder.norm_out.bias''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''decoder.conv_in.weight''']
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict['''decoder.conv_in.bias''']
SCREAMING_SNAKE_CASE_ : Tuple = vae_state_dict['''decoder.conv_out.weight''']
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict['''decoder.conv_out.bias''']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['''decoder.norm_out.weight''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''decoder.norm_out.bias''']
SCREAMING_SNAKE_CASE_ : str = vae_state_dict['''quant_conv.weight''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''quant_conv.bias''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''post_quant_conv.weight''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE_ : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
SCREAMING_SNAKE_CASE_ : Tuple = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE_ : Optional[Any] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
SCREAMING_SNAKE_CASE_ : Tuple = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(__a )
}
for i in range(__a ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE_ : List[str] = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
SCREAMING_SNAKE_CASE_ : Optional[int] = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
SCREAMING_SNAKE_CASE_ : List[Any] = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = {'''old''': f'down.{i}.block', '''new''': f'down_blocks.{i}.resnets'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : str = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
SCREAMING_SNAKE_CASE_ : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE_ : List[str] = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : Dict = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : Dict = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = renew_vae_attention_paths(__a )
SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
SCREAMING_SNAKE_CASE_ : List[str] = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE_ : Tuple = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
SCREAMING_SNAKE_CASE_ : str = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''old''': f'up.{block_id}.block', '''new''': f'up_blocks.{i}.resnets'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : str = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
SCREAMING_SNAKE_CASE_ : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE_ : int = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE_ : Dict = renew_vae_attention_paths(__a )
SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def _A (__a , __a , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
SCREAMING_SNAKE_CASE_ : Dict = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE_ : Any = OmegaConf.load(__a )
SCREAMING_SNAKE_CASE_ : int = 5_12
SCREAMING_SNAKE_CASE_ : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE_ : Dict = {}
with safe_open(__a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE_ : Any = f.get_tensor(__a )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(__a , map_location=__a )['''state_dict''']
# Convert the VAE model.
SCREAMING_SNAKE_CASE_ : Any = create_vae_diffusers_config(__a , image_size=__a )
SCREAMING_SNAKE_CASE_ : int = custom_convert_ldm_vae_checkpoint(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
UpperCAmelCase_ : Any = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 91
|
"""simple docstring"""
def _A (__a = 50 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 91
| 1
|
'''simple docstring'''
import os
from collections.abc import Iterator
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(SCREAMING_SNAKE_CASE__ )[1] in (".py", ".ipynb"):
yield os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).lstrip("""./""" )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return F'''{i * ' '}*''' if i else "\n##"
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(SCREAMING_SNAKE_CASE__ ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(SCREAMING_SNAKE_CASE__ )} {new_part.replace('_' , ' ' ).title()}''' )
return new_path
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str = "." ):
'''simple docstring'''
UpperCAmelCase__ = """"""
for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase__ , UpperCAmelCase__ = os.path.split(SCREAMING_SNAKE_CASE__ )
if filepath != old_path:
UpperCAmelCase__ = print_path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCAmelCase__ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
UpperCAmelCase__ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(SCREAMING_SNAKE_CASE__ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('.')
| 61
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCAmelCase_ = logging.get_logger(__name__)
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase__ = json.loads(SCREAMING_SNAKE_CASE__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase__ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase__ = json.loads(SCREAMING_SNAKE_CASE__ )
if not mpi_options.get("""sagemaker_mpi_enabled""" , SCREAMING_SNAKE_CASE__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _UpperCAmelCase , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
UpperCAmelCase__ = torch.device("""cpu""" )
UpperCAmelCase__ = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase__ = smp.local_rank()
UpperCAmelCase__ = torch.device("""cuda""" , _UpperCAmelCase )
UpperCAmelCase__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
UpperCAmelCase__ = torch.device("""cuda""" , self.local_rank )
UpperCAmelCase__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ = torch.device("""cuda""" , self.local_rank )
UpperCAmelCase__ = 1
if device.type == "cuda":
torch.cuda.set_device(_UpperCAmelCase )
return device
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return False
| 61
| 1
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a_ : Optional[Any] = logging.get_logger(__name__)
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ =os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ =json.loads(__snake_case )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ =os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ =json.loads(__snake_case )
if not mpi_options.get('''sagemaker_mpi_enabled''' , __snake_case ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : str =field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def lowercase__ ( self ):
"""simple docstring"""
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''', lowerCAmelCase, )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
lowerCamelCase_ =torch.device('''cpu''' )
lowerCamelCase_ =0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ =smp.local_rank()
lowerCamelCase_ =torch.device('''cuda''', lowerCAmelCase )
lowerCamelCase_ =1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''', timeout=self.ddp_timeout_delta )
lowerCamelCase_ =int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
lowerCamelCase_ =torch.device('''cuda''', self.local_rank )
lowerCamelCase_ =1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ =torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''', timeout=self.ddp_timeout_delta )
lowerCamelCase_ =torch.device('''cuda''', self.local_rank )
lowerCamelCase_ =1
if device.type == "cuda":
torch.cuda.set_device(lowerCAmelCase )
return device
@property
def lowercase__ ( self ):
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowercase__ ( self ):
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def lowercase__ ( self ):
"""simple docstring"""
return False
| 75
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a__ : Tuple ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 53
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_snake_case : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_snake_case : Any = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_snake_case : Optional[Any] = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ElectraTokenizer
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Tuple="[UNK]" , lowerCAmelCase_ : int="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Dict="[CLS]" , lowerCAmelCase_ : Optional[Any]="[MASK]" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=None ) -> Dict:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 350
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_snake_case : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( lowerCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead', lowerCAmelCase_, )
if isinstance(lowerCAmelCase_, torch.Tensor ):
return image
elif isinstance(lowerCAmelCase_, PIL.Image.Image ):
__lowerCAmelCase = [image]
if isinstance(image[0], PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCAmelCase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__lowerCAmelCase = np.concatenate(lowerCAmelCase_, axis=0 )
__lowerCAmelCase = np.array(lowerCAmelCase_ ).astype(np.floataa ) / 255.0
__lowerCAmelCase = image.transpose(0, 3, 1, 2 )
__lowerCAmelCase = 2.0 * image - 1.0
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
elif isinstance(image[0], torch.Tensor ):
__lowerCAmelCase = torch.cat(lowerCAmelCase_, dim=0 )
return image
def a_ ( lowerCAmelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(lowerCAmelCase_, torch.Tensor ):
return mask
elif isinstance(lowerCAmelCase_, PIL.Image.Image ):
__lowerCAmelCase = [mask]
if isinstance(mask[0], PIL.Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = mask[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCAmelCase = [np.array(m.convert('L' ).resize((w, h), resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__lowerCAmelCase = np.concatenate(lowerCAmelCase_, axis=0 )
__lowerCAmelCase = mask.astype(np.floataa ) / 255.0
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
elif isinstance(mask[0], torch.Tensor ):
__lowerCAmelCase = torch.cat(lowerCAmelCase_, dim=0 )
return mask
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
super().__init__()
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : Dict , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] , lowerCAmelCase_ : int = 2_5_0 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
__lowerCAmelCase = image
__lowerCAmelCase = _preprocess_image(lowerCAmelCase_ )
__lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = _preprocess_mask(lowerCAmelCase_ )
__lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCAmelCase = original_image.shape
__lowerCAmelCase = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.device )
__lowerCAmelCase = eta
__lowerCAmelCase = self.scheduler.timesteps[0] + 1
__lowerCAmelCase = generator[0] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowerCAmelCase = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCAmelCase = self.scheduler.undo_step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = t
__lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 207
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__: int = logging.get_logger(__name__)
UpperCamelCase__: Tuple = {"vocab_file": "spiece.model"}
UpperCamelCase__: List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : Dict , __snake_case : Optional[Any] , __snake_case : Optional[Any]=False , __snake_case : str=True , __snake_case : List[Any]=False , __snake_case : Union[str, Any]="<s>" , __snake_case : Tuple="</s>" , __snake_case : str="<unk>" , __snake_case : int="<sep>" , __snake_case : str="<pad>" , __snake_case : Optional[int]="<cls>" , __snake_case : Dict="<mask>" , __snake_case : str=["<eop>", "<eod>"] , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Any , ) -> None:
UpperCAmelCase : Any = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
UpperCAmelCase : Union[str, Any] = 3
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[Any] = remove_space
UpperCAmelCase : str = keep_accents
UpperCAmelCase : Optional[Any] = vocab_file
UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
UpperCAmelCase : List[str] = jieba
UpperCAmelCase : Any = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def A ( self : Tuple ) -> Dict:
return len(self.sp_model )
def A ( self : Tuple ) -> int:
UpperCAmelCase : Dict = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Any:
UpperCAmelCase : int = self.__dict__.copy()
UpperCAmelCase : Optional[Any] = None
return state
def __setstate__( self : Any , __snake_case : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase : Dict = {}
UpperCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : int , __snake_case : int ) -> Tuple:
if self.remove_space:
UpperCAmelCase : Dict = ''' '''.join(inputs.strip().split() )
else:
UpperCAmelCase : Any = inputs
UpperCAmelCase : List[str] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
UpperCAmelCase : int = unicodedata.normalize('''NFKD''' , __snake_case )
UpperCAmelCase : List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(__snake_case )] )
if self.do_lower_case:
UpperCAmelCase : Union[str, Any] = outputs.lower()
return outputs
def A ( self : Any , __snake_case : str ) -> List[str]:
UpperCAmelCase : Dict = self.preprocess_text(__snake_case )
UpperCAmelCase : Union[str, Any] = self.sp_model.encode(__snake_case , out_type=__snake_case )
UpperCAmelCase : List[Any] = []
for piece in pieces:
if len(__snake_case ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
UpperCAmelCase : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__snake_case , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase : Union[str, Any] = cur_pieces[1:]
else:
UpperCAmelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__snake_case )
else:
new_pieces.append(__snake_case )
return new_pieces
def A ( self : Dict , __snake_case : Tuple ) -> List[Any]:
return self.sp_model.PieceToId(__snake_case )
def A ( self : Any , __snake_case : str ) -> List[Any]:
return self.sp_model.IdToPiece(__snake_case )
def A ( self : str , __snake_case : Tuple ) -> Dict:
UpperCAmelCase : List[Any] = ''''''.join(__snake_case ).replace(__snake_case , ''' ''' ).strip()
return out_string
def A ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1, 1]
return ([0] * len(__snake_case )) + [1, 1]
def A ( self : Any , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Any = [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Any = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def A ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Optional[Any] ) -> int:
UpperCAmelCase : Any = super()._decode(*__snake_case , **__snake_case )
UpperCAmelCase : Any = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 23
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
__A = {
"google/rembert": 256,
}
__A = "▁"
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = RemBertTokenizer
def __init__( self : Tuple , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int="[CLS]" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Dict="[SEP]" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : Any="[CLS]" , UpperCamelCase__ : str="[MASK]" , **UpperCamelCase__ : Optional[Any] , )-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = do_lower_case
__lowerCAmelCase: int = remove_space
__lowerCAmelCase: int = keep_accents
__lowerCAmelCase: str = vocab_file
__lowerCAmelCase: Tuple = False if not self.vocab_file else True
def lowercase_ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = [self.sep_token_id]
__lowerCAmelCase: Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False)-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__)) + [1] + ([0] * len(UpperCamelCase__)) + [1]
return [1] + ([0] * len(UpperCamelCase__)) + [1]
def lowercase_ ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = [self.sep_token_id]
__lowerCAmelCase: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase__))
return
__lowerCAmelCase: Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase__):
copyfile(self.vocab_file , UpperCamelCase__)
return (out_vocab_file,)
| 217
| 0
|
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__UpperCamelCase = get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ) -> Any:
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Saving model to {output_model_file}' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase_ , F'{MODEL_NAME}_{model_index}' )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
logger.info(F'Saving model to {ckpt_dir}' )
SCREAMING_SNAKE_CASE = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=lowerCamelCase_ , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase_ ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 ) -> List[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowerCamelCase_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
SCREAMING_SNAKE_CASE = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Loading model from {input_model_file}' )
SCREAMING_SNAKE_CASE = torch.load(lowerCamelCase_ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Loading model from {input_model_file}' )
SCREAMING_SNAKE_CASE = torch.load(lowerCamelCase_ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE = (
os.path.join(lowerCamelCase_ , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
SCREAMING_SNAKE_CASE = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowerCamelCase_ , storage_reader=dist_cp.FileSystemReader(lowerCamelCase_ ) , planner=DefaultLoadPlanner() , )
SCREAMING_SNAKE_CASE = state_dict["""model"""]
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(lowerCamelCase_ )
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=0 ) -> List[str]:
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE = FSDP.optim_state_dict(lowerCamelCase_ , lowerCamelCase_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
SCREAMING_SNAKE_CASE = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase_ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase_ ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=0 ) -> str:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
SCREAMING_SNAKE_CASE = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
SCREAMING_SNAKE_CASE = torch.load(lowerCamelCase_ )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
SCREAMING_SNAKE_CASE = (
os.path.join(lowerCamelCase_ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
SCREAMING_SNAKE_CASE = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(lowerCamelCase_ ) , )
SCREAMING_SNAKE_CASE = optim_state["""optimizer"""]
logger.info(F'Optimizer loaded from {ckpt_dir}' )
SCREAMING_SNAKE_CASE = FSDP.optim_state_dict_to_load(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
optimizer.load_state_dict(lowerCamelCase_ )
| 358
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''allenai/led-base-16384''': 16384,
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> List[Any]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , lowerCAmelCase__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __A ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , lowerCAmelCase__ ) -> int:
SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
SCREAMING_SNAKE_CASE = value
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> dict:
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 38
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "megatron-bert"
def __init__( self : int , lowercase_ : int=29056 , lowercase_ : Dict=1024 , lowercase_ : Optional[Any]=24 , lowercase_ : Union[str, Any]=16 , lowercase_ : Optional[int]=4096 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : int=512 , lowercase_ : Optional[Any]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1e-12 , lowercase_ : Optional[Any]=0 , lowercase_ : Dict="absolute" , lowercase_ : Any=True , **lowercase_ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = position_embedding_type
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_cache
| 91
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[str] = ["model.decoder.embed_positions.weights"]
def A_ ( snake_case : Any ) -> List[Any]:
'''simple docstring'''
if "emb" in name:
__UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__UpperCamelCase = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__UpperCamelCase = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__UpperCamelCase = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def A_ ( snake_case : OrderedDict , snake_case : int ) -> Tuple[Dict, Dict]:
'''simple docstring'''
__UpperCamelCase = list(state_dict.keys() )
__UpperCamelCase = {}
for key in keys:
__UpperCamelCase = state_dict.pop(snake_case )
__UpperCamelCase = rename_keys(snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCamelCase = val[:hidden_size, :]
__UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
__UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCamelCase = val
else:
__UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def A_ ( snake_case : str ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
__UpperCamelCase = 1024
__UpperCamelCase = 24
__UpperCamelCase = 16
elif checkpoint == "medium":
__UpperCamelCase = 1536
__UpperCamelCase = 48
__UpperCamelCase = 24
elif checkpoint == "large":
__UpperCamelCase = 2048
__UpperCamelCase = 48
__UpperCamelCase = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
__UpperCamelCase = MusicgenDecoderConfig(
hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , )
return config
@torch.no_grad()
def A_ ( snake_case : Any , snake_case : str=None , snake_case : Any=None , snake_case : Union[str, Any]="cpu" ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = MusicGen.get_pretrained(snake_case , device=snake_case )
__UpperCamelCase = decoder_config_from_checkpoint(snake_case )
__UpperCamelCase = fairseq_model.lm.state_dict()
__UpperCamelCase , __UpperCamelCase = rename_state_dict(
snake_case , hidden_size=decoder_config.hidden_size )
__UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' )
__UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__UpperCamelCase = MusicgenForCausalLM(snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(snake_case , strict=snake_case )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case )
if len(snake_case ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(snake_case ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
__UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case )
# check we can do a forward pass
__UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCamelCase = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__UpperCamelCase = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case )
# set the appropriate bos/pad token ids
__UpperCamelCase = 2048
__UpperCamelCase = 2048
# set other default generation config params
__UpperCamelCase = int(30 * audio_encoder.config.frame_rate )
__UpperCamelCase = True
__UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(snake_case )
processor.push_to_hub(snake_case )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowercase__ : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 328
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__lowercase : ClassVar[Features] = Features({'image': Image()} )
__lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
__lowercase : str = "image"
__lowercase : str = "labels"
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def __A ( self ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 18
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18
| 1
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a = logging.getLogger(__name__)
_a = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase__ )} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """The input training data file (a text file)."""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} ,)
SCREAMING_SNAKE_CASE__ : bool = field(
default=lowercase__ ,metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} ,)
SCREAMING_SNAKE_CASE__ : bool = field(
default=lowercase__ ,metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
SCREAMING_SNAKE_CASE__ : bool = field(default=lowercase__ ,metadata={"""help""": """Whether ot not to use whole word mask."""} )
SCREAMING_SNAKE_CASE__ : float = field(
default=0.1_5 ,metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
SCREAMING_SNAKE_CASE__ : float = field(
default=1 / 6 ,metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} ,)
SCREAMING_SNAKE_CASE__ : int = field(
default=5 ,metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
SCREAMING_SNAKE_CASE__ : int = field(
default=-1 ,metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} ,)
SCREAMING_SNAKE_CASE__ : bool = field(
default=lowercase__ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False, __lowerCamelCase = None, ):
def _dataset(__lowerCamelCase, __lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=__lowerCamelCase, file_path=__lowerCamelCase, block_size=args.block_size, ref_path=__lowerCamelCase, )
return LineByLineTextDataset(tokenizer=__lowerCamelCase, file_path=__lowerCamelCase, block_size=args.block_size )
else:
return TextDataset(
tokenizer=__lowerCamelCase, file_path=__lowerCamelCase, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=__lowerCamelCase, )
if evaluate:
return _dataset(args.eval_data_file, args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file, args.train_ref_file )
def __a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_ : str = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_ : int = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
UpperCAmelCase_ : Any = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=__lowerCamelCase, cache_dir=model_args.cache_dir, )
else:
logger.info("Training new model from scratch" )
UpperCAmelCase_ : Any = AutoModelWithLMHead.from_config(__lowerCamelCase )
model.resize_token_embeddings(len(__lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
UpperCAmelCase_ : Tuple = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_ : Tuple = min(data_args.block_size, tokenizer.max_len )
# Get datasets
UpperCAmelCase_ : List[Any] = (
get_dataset(__lowerCamelCase, tokenizer=__lowerCamelCase, cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_ : List[str] = (
get_dataset(__lowerCamelCase, tokenizer=__lowerCamelCase, evaluate=__lowerCamelCase, cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_ : Tuple = DataCollatorForPermutationLanguageModeling(
tokenizer=__lowerCamelCase, plm_probability=data_args.plm_probability, max_span_length=data_args.max_span_length, )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_ : int = DataCollatorForWholeWordMask(
tokenizer=__lowerCamelCase, mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_ : List[Any] = DataCollatorForLanguageModeling(
tokenizer=__lowerCamelCase, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ : List[str] = Trainer(
model=__lowerCamelCase, args=__lowerCamelCase, data_collator=__lowerCamelCase, train_dataset=__lowerCamelCase, eval_dataset=__lowerCamelCase, prediction_loss_only=__lowerCamelCase, )
# Training
if training_args.do_train:
UpperCAmelCase_ : Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate()
UpperCAmelCase_ : List[str] = math.exp(eval_output["eval_loss"] )
UpperCAmelCase_ : List[Any] = {"perplexity": perplexity}
UpperCAmelCase_ : Union[str, Any] = os.path.join(training_args.output_dir, "eval_results_lm.txt" )
if trainer.is_world_master():
with open(__lowerCamelCase, "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s", __lowerCamelCase, str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(__lowerCamelCase )
return results
def __a ( __lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 61
|
"""simple docstring"""
import os
_a = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[str] = 0
while index < len(__lowerCamelCase ) - 1:
UpperCAmelCase_ : Tuple = SYMBOLS[numerals[index]]
UpperCAmelCase_ : List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : Any = num // 1000
numerals += m_count * "M"
num %= 1000
UpperCAmelCase_ : Any = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCAmelCase_ : str = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __a ( __lowerCamelCase = "/p089_roman.txt" ):
UpperCAmelCase_ : int = 0
with open(os.path.dirname(__lowerCamelCase ) + roman_numerals_filename ) as filea:
UpperCAmelCase_ : Optional[Any] = filea.readlines()
for line in lines:
UpperCAmelCase_ : Tuple = line.strip()
UpperCAmelCase_ : Optional[Any] = parse_roman_numerals(__lowerCamelCase )
UpperCAmelCase_ : Tuple = generate_roman_numerals(__lowerCamelCase )
savings += len(__lowerCamelCase ) - len(__lowerCamelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 61
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCamelCase : List[str] = random.Random()
def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
if rng is None:
UpperCamelCase : List[Any] = global_rng
UpperCamelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ):
'''simple docstring'''
UpperCamelCase : str = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : int = min_seq_length
UpperCamelCase : Any = max_seq_length
UpperCamelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = padding_value
UpperCamelCase : Dict = sampling_rate
UpperCamelCase : Optional[int] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
UpperCamelCase : List[Any] = feature_size
UpperCamelCase : Union[str, Any] = chunk_length
UpperCamelCase : str = hop_length
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase( self , A_=False , A_=False ):
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
UpperCamelCase : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase : Dict = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = WhisperFeatureExtractionTester(self )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : List[str] = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
UpperCamelCase : str = self.feature_extraction_class.from_pretrained(A_ )
UpperCamelCase : Any = feat_extract_first.to_dict()
UpperCamelCase : Optional[Any] = feat_extract_second.to_dict()
UpperCamelCase : List[str] = feat_extract_first.mel_filters
UpperCamelCase : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : Optional[Any] = os.path.join(A_ , "feat_extract.json" )
feat_extract_first.to_json_file(A_ )
UpperCamelCase : Tuple = self.feature_extraction_class.from_json_file(A_ )
UpperCamelCase : Tuple = feat_extract_first.to_dict()
UpperCamelCase : Tuple = feat_extract_second.to_dict()
UpperCamelCase : Optional[int] = feat_extract_first.mel_filters
UpperCamelCase : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase : List[str] = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase : List[str] = feature_extractor(A_ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase : str = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
UpperCamelCase : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
UpperCamelCase : Tuple = feature_extractor(A_ , return_tensors="np" ).input_features
UpperCamelCase : int = feature_extractor(A_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : List[Any] = np.asarray(A_ )
UpperCamelCase : Optional[int] = feature_extractor(A_ , return_tensors="np" ).input_features
UpperCamelCase : List[Any] = feature_extractor(A_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test truncation required
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
UpperCamelCase : List[Any] = [np.asarray(A_ ) for speech_input in speech_inputs]
UpperCamelCase : str = [x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCamelCase : str = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
UpperCamelCase : int = feature_extractor(A_ , return_tensors="np" ).input_features
UpperCamelCase : List[Any] = feature_extractor(A_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase : Tuple = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase : List[str] = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
UpperCamelCase : Dict = self._load_datasamples(1 )
UpperCamelCase : List[str] = WhisperFeatureExtractor()
UpperCamelCase : Optional[int] = feature_extractor(A_ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = self._load_datasamples(1 )[0]
UpperCamelCase : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
UpperCamelCase : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
| 140
|
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : List[str] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
__lowerCamelCase : List[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
__lowerCamelCase : Optional[int] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Tuple = spearmanr(A_ , A_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 140
| 1
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A( A__ , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
__a = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__a = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = '''tester'''
__a = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__a = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__a = tokenizer.encode([special_token] , add_special_tokens=_snake_case )
self.assertEqual(len(_snake_case ) , 1 )
__a = tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__a , __a = self.get_input_output_texts(_snake_case )
__a = tokenizer.tokenize(_snake_case )
__a = tokenizer.convert_tokens_to_ids(_snake_case )
__a = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__a = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertNotEqual(len(_snake_case ) , 0 )
__a = tokenizer.decode(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _snake_case )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
| 6
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BlipImageProcessor"""
lowercase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict, lowerCamelCase : Dict, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = False
super().__init__(lowerCamelCase, lowerCamelCase )
lowercase__ = self.image_processor
def __call__( self : int, lowerCamelCase : ImageInput = None, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : Any, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
return text_encoding
# add pixel_values
lowercase__ = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase )
if text is not None:
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def lowercase__ ( self : Tuple, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[str], *lowerCamelCase : int, **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 207
| 0
|
from jiwer import compute_measures
import datasets
__A = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__A = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__A = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def UpperCamelCase_ ( self : Tuple ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCamelCase_ ( self : Optional[int] , A__ : List[Any]=None , A__ : Any=None , A__ : Tuple=False ) -> Optional[Any]:
if concatenate_texts:
return compute_measures(A__ , A__ )["wer"]
else:
_snake_case = 0
_snake_case = 0
for prediction, reference in zip(A__ , A__ ):
_snake_case = compute_measures(A__ , A__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 364
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 278
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = Dict[str, Any]
lowercase_ = List[Prediction]
@add_end_docstrings(_UpperCAmelCase )
class A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int],*lowercase_ : Union[str, Any],**lowercase_ : Tuple )-> int:
'''simple docstring'''
super().__init__(*lowercase_,**lowercase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self,'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def snake_case__ ( self : Union[str, Any],**lowercase_ : int )-> Any:
'''simple docstring'''
A__ = {}
if "threshold" in kwargs:
A__ = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : List[str],*lowercase_ : int,**lowercase_ : Union[str, Any] )-> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*lowercase_,**lowercase_ )
def snake_case__ ( self : Tuple,lowercase_ : Any )-> List[Any]:
'''simple docstring'''
A__ = load_image(lowercase_ )
A__ = torch.IntTensor([[image.height, image.width]] )
A__ = self.image_processor(images=[image],return_tensors='pt' )
if self.tokenizer is not None:
A__ = self.tokenizer(text=inputs['words'],boxes=inputs['boxes'],return_tensors='pt' )
A__ = target_size
return inputs
def snake_case__ ( self : Dict,lowercase_ : int )-> str:
'''simple docstring'''
A__ = model_inputs.pop('target_size' )
A__ = self.model(**lowercase_ )
A__ = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
A__ = model_inputs['bbox']
return model_outputs
def snake_case__ ( self : List[str],lowercase_ : Tuple,lowercase_ : Union[str, Any]=0.9 )-> List[str]:
'''simple docstring'''
A__ = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A__ , A__ = target_size[0].tolist()
def unnormalize(lowercase_ : List[str] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
A__ , A__ = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A__ = [unnormalize(lowercase_ ) for bbox in model_outputs['bbox'].squeeze(0 )]
A__ = ['score', 'label', 'box']
A__ = [dict(zip(lowercase_,lowercase_ ) ) for vals in zip(scores.tolist(),lowercase_,lowercase_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A__ = self.image_processor.post_process_object_detection(lowercase_,lowercase_,lowercase_ )
A__ = raw_annotations[0]
A__ = raw_annotation['scores']
A__ = raw_annotation['labels']
A__ = raw_annotation['boxes']
A__ = scores.tolist()
A__ = [self.model.config.idalabel[label.item()] for label in labels]
A__ = [self._get_bounding_box(lowercase_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A__ = ['score', 'label', 'box']
A__ = [
dict(zip(lowercase_,lowercase_ ) )
for vals in zip(raw_annotation['scores'],raw_annotation['labels'],raw_annotation['boxes'] )
]
return annotation
def snake_case__ ( self : Optional[int],lowercase_ : "torch.Tensor" )-> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
A__ , A__ , A__ , A__ = box.int().tolist()
A__ = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 7
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 38
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , )-> None:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = size if size is not None else {'shortest_edge': 224}
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ )
UpperCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase = do_convert_rgb
def UpperCAmelCase_ ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase = get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
UpperCamelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = None , **A_ , )-> Dict:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , )-> PIL.Image.Image:
'''simple docstring'''
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(A_ , param_name='size' , default_to_square=A_ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ )
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase = [convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(A_ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(A_ , A_ ) for image in images]
UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 251
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=13 , A_=2 , A_=24 , A_=16 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=None , A_=2 , A_=2 , )-> int:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = patch_size
UpperCamelCase = max_length
UpperCamelCase = num_mel_bins
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = frequency_stride
UpperCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase = frequency_out_dimension * time_out_dimension
UpperCamelCase = num_patches + 2
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, input_values, labels
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = ASTModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_values': input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ )-> Dict:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ASTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['input_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ASTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_( ):
UpperCamelCase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset')
UpperCamelCase , UpperCamelCase = torchaudio.load(A)
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.default_feature_extractor
UpperCamelCase = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(A_ )
UpperCamelCase = self.default_feature_extractor
UpperCamelCase , UpperCamelCase = prepare_audio()
UpperCamelCase = audio.squeeze().numpy()
UpperCamelCase = feature_extractor(A_ , sampling_rate=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 251
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class a__ ( A__ ):
A = 'rwkv'
A = {'max_position_embeddings': 'context_length'}
def __init__( self : List[Any],_A : int=5_0277,_A : Any=1024,_A : List[str]=4096,_A : Union[str, Any]=32,_A : List[str]=None,_A : List[str]=None,_A : Optional[int]=1E-5,_A : Any=0,_A : Any=0,_A : Any=6,_A : Any=False,_A : Any=True,**_A : Optional[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = context_length
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Optional[Any] = rescale_every
SCREAMING_SNAKE_CASE_ : str = use_cache
SCREAMING_SNAKE_CASE_ : Any = bos_token_id
SCREAMING_SNAKE_CASE_ : int = eos_token_id
super().__init__(
tie_word_embeddings=_A,bos_token_id=_A,eos_token_id=_A,**_A )
| 18
|
from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
| 1
|
snake_case : List[str] = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
snake_case : Optional[int] = frozenset(['''prompt''', '''negative_prompt'''])
snake_case : List[str] = frozenset([])
snake_case : Optional[int] = frozenset(['''image'''])
snake_case : Union[str, Any] = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
snake_case : List[Any] = frozenset(['''image'''])
snake_case : List[str] = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
snake_case : Union[str, Any] = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
snake_case : Optional[int] = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
snake_case : int = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
snake_case : int = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
snake_case : List[Any] = frozenset(['''image''', '''mask_image'''])
snake_case : Optional[int] = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
snake_case : List[str] = frozenset(['''example_image''', '''image''', '''mask_image'''])
snake_case : List[Any] = frozenset(['''class_labels'''])
snake_case : Optional[Any] = frozenset(['''class_labels'''])
snake_case : Union[str, Any] = frozenset(['''batch_size'''])
snake_case : List[str] = frozenset([])
snake_case : Dict = frozenset(['''batch_size'''])
snake_case : str = frozenset([])
snake_case : Dict = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
snake_case : int = frozenset(['''prompt''', '''negative_prompt'''])
snake_case : Tuple = frozenset(['''input_tokens'''])
snake_case : Tuple = frozenset(['''input_tokens'''])
| 368
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Any=100 , UpperCAmelCase_ : List[str]=1026 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str="data/tokenized_stories_train_wikitext103.jbl" , UpperCAmelCase_ : List[Any]="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
a , a :Optional[int] = generate_datasets(
UpperCAmelCase_ , UpperCAmelCase_ , number=UpperCAmelCase_ , min_len=1026 , trim=UpperCAmelCase_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
a :str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
a :str = load_gpta('''gpt2''' ).to(UpperCAmelCase_ )
print('''computing perplexity on objective set''' )
a :Dict = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).item()
print('''perplexity on objective set:''' , UpperCAmelCase_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=15 , UpperCAmelCase_ : Optional[Any]=128 , UpperCAmelCase_ : List[Any]=100 , UpperCAmelCase_ : List[str]="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
a :Tuple = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
a :List[str] = SecondaryLearner(UpperCAmelCase_ )
# Train secondary learner
a :List[str] = train_secondary_learner(
UpperCAmelCase_ , UpperCAmelCase_ , max_epochs=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , eval_freq=100 , igf_model_path=UpperCAmelCase_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : List[str]=1000 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Any=1.0 , UpperCAmelCase_ : Optional[int]=recopy_gpta , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=10 , UpperCAmelCase_ : Any="gpt2_finetuned.pt" , ):
"""simple docstring"""
a :Optional[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
a :Optional[Any] = RandomSampler(UpperCAmelCase_ )
a :Union[str, Any] = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ )
a :List[str] = max_steps // (len(UpperCAmelCase_ )) + 1
a :Tuple = 0
a :int = torch.zeros((1, context_len) , dtype=torch.long , device=UpperCAmelCase_ )
a , a , a :str = recopy_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCAmelCase_ )
secondary_learner.eval()
a :Optional[Any] = []
a :Union[str, Any] = 0
a :Optional[Any] = []
a :Tuple = []
# Compute the performance of the transformer model at the beginning
a :Any = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
test_perps.append(UpperCAmelCase_ )
print('''Test perplexity, step''' , UpperCAmelCase_ , ''':''' , UpperCAmelCase_ )
for epoch in range(int(UpperCAmelCase_ ) ):
for step, example in enumerate(UpperCAmelCase_ ):
torch.cuda.empty_cache()
a :Tuple = random.randint(0 , example.size(2 ) - context_len - 1 )
a :Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
a :Optional[int] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
a :int = True
if secondary_learner is not None:
a :Tuple = secondary_learner.forward(
torch.tensor(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCAmelCase_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
a :List[str] = -1
if predicted_q < threshold:
a :Tuple = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
a :Any = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
a :Tuple = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
a :Dict = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
test_perps.append(UpperCAmelCase_ )
print('''Test perplexity, step''' , UpperCAmelCase_ , ''':''' , UpperCAmelCase_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , UpperCAmelCase_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowerCamelCase ( ):
"""simple docstring"""
a :Union[str, Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=UpperCAmelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=UpperCAmelCase_ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=UpperCAmelCase_ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=UpperCAmelCase_ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=UpperCAmelCase_ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=UpperCAmelCase_ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=UpperCAmelCase_ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=UpperCAmelCase_ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=UpperCAmelCase_ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=UpperCAmelCase_ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=UpperCAmelCase_ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=UpperCAmelCase_ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=UpperCAmelCase_ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
a :Union[str, Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
a :Any = training_secondary_learner(
UpperCAmelCase_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
a :Any = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
a , a :Union[str, Any] = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=UpperCAmelCase_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=UpperCAmelCase_ , secondary_learner=UpperCAmelCase_ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 281
| 0
|
def UpperCamelCase ( __lowercase : int = 10_00 ):
'''simple docstring'''
A_ : Tuple = -1
A_ : List[str] = 0
for a in range(1 ,n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
A_ : List[str] = (n * n - 2 * a * n) // (2 * n - 2 * a)
A_ : str = n - a - b
if c * c == (a * a + b * b):
A_ : Optional[Any] = a * b * c
if candidate >= product:
A_ : List[str] = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 140
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase ( __lowercase : dict ,__lowercase : str ,__lowercase : set ,__lowercase : set ,__lowercase : dict ,__lowercase : dict ,__lowercase : PriorityQueue ,__lowercase : dict ,__lowercase : float | int ,):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ : List[str] = cst_fwd.get(__lowercase ,np.inf )
A_ : Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ : Any = new_cost_f
A_ : Optional[int] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ : str = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : dict ,__lowercase : dict ):
'''simple docstring'''
A_ : List[str] = -1
A_ : List[Any] = set()
A_ : Union[str, Any] = set()
A_ : int = {source: 0}
A_ : List[Any] = {destination: 0}
A_ : Dict = {source: None}
A_ : Optional[int] = {destination: None}
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ : List[str] = queue_forward.get()
visited_forward.add(__lowercase )
A_ , A_ : Union[str, Any] = queue_backward.get()
visited_backward.add(__lowercase )
A_ : int = pass_and_relaxation(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,)
A_ : str = pass_and_relaxation(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ : int = shortest_distance
return shortest_path_distance
_UpperCAmelCase = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
_UpperCAmelCase = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 1
|
"""simple docstring"""
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : list[str]) -> str:
'''simple docstring'''
__lowercase = ""
for word_or_phrase in separated:
if not isinstance(UpperCamelCase_, UpperCamelCase_):
raise Exception("join() accepts only strings to be joined")
joined += word_or_phrase + separator
return joined.strip(UpperCamelCase_)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 350
|
"""simple docstring"""
import argparse
import os
import re
_a = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
_a = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : bool = False) -> Optional[Any]:
'''simple docstring'''
with open(UpperCamelCase_, "r", encoding="utf-8") as f:
__lowercase = f.read()
__lowercase = content.split("\n")
__lowercase = []
__lowercase = 0
while line_idx < len(UpperCamelCase_):
if _re_intro_mapping.search(lines[line_idx]) is not None:
__lowercase = len(re.search(r"^(\s*)\S", lines[line_idx]).groups()[0]) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "("):
new_lines.append(lines[line_idx])
line_idx += 1
__lowercase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__lowercase = line_idx
while not lines[line_idx].startswith(" " * indent + ")"):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1]))
else:
blocks.append(lines[line_idx])
line_idx += 1
# Sort blocks by their identifiers
__lowercase = sorted(UpperCamelCase_, key=lambda UpperCamelCase_: _re_identifier.search(UpperCamelCase_).groups()[0])
new_lines += blocks
else:
new_lines.append(lines[line_idx])
line_idx += 1
if overwrite:
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write("\n".join(UpperCamelCase_))
elif "\n".join(UpperCamelCase_) != content:
return True
def _A ( UpperCamelCase_ : bool = False) -> Optional[Any]:
'''simple docstring'''
__lowercase = [os.path.join(UpperCamelCase_, UpperCamelCase_) for f in os.listdir(UpperCamelCase_) if f.endswith(".py")]
__lowercase = [sort_auto_mapping(UpperCamelCase_, overwrite=UpperCamelCase_) for fname in fnames]
if not overwrite and any(UpperCamelCase_):
__lowercase = [f for f, d in zip(UpperCamelCase_, UpperCamelCase_) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(UpperCamelCase_)}. Run `make style` to fix"""
" this.")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_a = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 144
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : int = ["""speech"""]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ['''speech'''] )
class lowerCAmelCase_ ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ["""speech"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''speech'''] )
| 319
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 7
if "tiny" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 6, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCAmelCase_ = 128
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (4, 8, 16, 32)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 512
elif "large" in model_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (6, 12, 24, 48)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 768
# set label information
lowerCAmelCase_ = 150
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''ade20k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = SwinConfig(
embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowerCAmelCase_ = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:dim, :]
lowerCAmelCase_ = in_proj_bias[: dim]
lowerCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase_ = in_proj_weight[
-dim :, :
]
lowerCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(4 , in_channel // 4 )
lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowerCAmelCase_ = model_name_to_url[model_name]
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[
'''state_dict'''
]
for name, param in state_dict.items():
print(_A , param.shape )
lowerCAmelCase_ = get_upernet_config(_A )
lowerCAmelCase_ = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
if "bn" in key:
lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' )
lowerCAmelCase_ = val
# rename keys
lowerCAmelCase_ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A )
if "norm" in key:
lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A )
model.load_state_dict(_A )
# verify on image
lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
lowerCAmelCase_ = SegformerImageProcessor()
lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCAmelCase_ = model(_A )
lowerCAmelCase_ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCAmelCase_ = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
lowerCAmelCase_ = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
lowerCAmelCase_ = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
lowerCAmelCase_ = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278
| 0
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCamelCase : List[str] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase : str = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCamelCase : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__UpperCamelCase : str = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : str ):
lowerCAmelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'config.{attribute}' in modeling_source
or F'getattr(config, "{attribute}"' in modeling_source
or F'getattr(self.config, "{attribute}"' in modeling_source
):
lowerCAmelCase = True
# Deal with multi-line cases
elif (
re.search(
RF'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , _UpperCAmelCase , )
is not None
):
lowerCAmelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCAmelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCAmelCase = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
lowerCAmelCase = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
lowerCAmelCase = True
if not attribute_used:
lowerCAmelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCAmelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCAmelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCAmelCase = True
elif attribute.endswith('_token_id' ):
lowerCAmelCase = True
# configuration class specific cases
if not case_allowed:
lowerCAmelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCAmelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
lowerCAmelCase = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCAmelCase = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
lowerCAmelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCAmelCase = {}
if len(config_class.attribute_map ) > 0:
lowerCAmelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCAmelCase = inspect.getsourcefile(_UpperCAmelCase )
lowerCAmelCase = os.path.dirname(_UpperCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCAmelCase = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for fn in os.listdir(_UpperCAmelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
lowerCAmelCase = []
for path in modeling_paths:
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as fp:
modeling_sources.append(fp.read() )
lowerCAmelCase = []
for config_param, default_value in zip(_UpperCAmelCase , _UpperCAmelCase ):
# `attributes` here is all the variant names for `config_param`
lowerCAmelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCAmelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _UpperCAmelCase : inspect.isclass(_UpperCAmelCase )
and issubclass(_UpperCAmelCase , _UpperCAmelCase )
and inspect.getmodule(_UpperCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCAmelCase = check_config_attributes_being_used(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
lowerCAmelCase = unused_attributes
if len(_UpperCAmelCase ) > 0:
lowerCAmelCase = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F'{name}: {attributes}\n'
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 309
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251
|
'''simple docstring'''
from itertools import product
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = sides_number
SCREAMING_SNAKE_CASE : str = max_face_number * dice_number
SCREAMING_SNAKE_CASE : Tuple = [0] * (max_total + 1)
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : int = range(__UpperCamelCase ,max_face_number + 1 )
for dice_numbers in product(__UpperCamelCase ,repeat=__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Any = sum(__UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
SCREAMING_SNAKE_CASE : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = 9
SCREAMING_SNAKE_CASE : Dict = 4 * 9
SCREAMING_SNAKE_CASE : str = 6
for peter_total in range(__UpperCamelCase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
SCREAMING_SNAKE_CASE : Optional[Any] = (4**9) * (6**6)
SCREAMING_SNAKE_CASE : int = peter_wins_count / total_games_number
SCREAMING_SNAKE_CASE : Optional[Any] = round(__UpperCamelCase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 251
| 1
|
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = int(lowerCamelCase__ )
# Initialize Result
lowercase__ : Tuple = []
# Traverse through all denomination
for denomination in reversed(lowerCamelCase__ ):
# Find denominations
while int(lowerCamelCase__ ) >= int(lowerCamelCase__ ):
total_value -= int(lowerCamelCase__ )
answer.append(lowerCamelCase__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 363
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowercase__ : Dict = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Optional[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowercase__ : Optional[Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowercase__ : int = shift_tokens_right(SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE ).logits
lowercase__ : Dict = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE , onehot(SCREAMING_SNAKE_CASE , logits.shape[-1] ) ).mean()
lowercase__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowercase__ : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 121
| 0
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCamelCase__ = ["text", "image", "audio"]
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_snake_case , _snake_case ):
inputs.append(create_inputs(_snake_case ) )
else:
raise ValueError(F"Invalid type requested: {input_type}" )
return inputs
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = []
for output in outputs:
if isinstance(_snake_case , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_snake_case , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_snake_case , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F"Invalid output: {output}" )
return output_types
@is_tool_test
class A__ :
def __lowerCamelCase ( self ):
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
__lowerCAmelCase : Union[str, Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__lowerCAmelCase : List[Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = create_inputs(self.tool.inputs )
__lowerCAmelCase : List[str] = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
__lowerCAmelCase : Any = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def __lowerCamelCase ( self ):
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = create_inputs(self.tool.inputs )
__lowerCAmelCase : Dict = self.tool(*_a )
if not isinstance(_a , _a ):
__lowerCAmelCase : List[Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
__lowerCAmelCase : str = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = create_inputs(self.tool.inputs )
__lowerCAmelCase : Optional[Any] = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__lowerCAmelCase : List[Any] = self.tool(*_a )
if not isinstance(_a , _a ):
__lowerCAmelCase : str = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 86
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( _snake_case : str = "laptop" ) -> DataFrame:
'''simple docstring'''
__magic_name__ : Tuple = F'''https://www.amazon.in/laptop/s?k={product}'''
__magic_name__ : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__magic_name__ : Tuple = BeautifulSoup(requests.get(_snake_case , headers=_snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__magic_name__ : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
__magic_name__ : Dict = item.ha.text
__magic_name__ : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
__magic_name__ : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
__magic_name__ : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__magic_name__ : Dict = "Not available"
try:
__magic_name__ : Optional[int] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__magic_name__ : List[str] = ""
try:
__magic_name__ : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
__magic_name__ : str = float("nan" )
except AttributeError:
pass
__magic_name__ : Optional[int] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__magic_name__ : Optional[Any] = " "
__magic_name__ : str = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case : Any = "headphones"
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 281
| 0
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Tuple = logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase_ : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCAmelCase__ = MaskFormerConfig(backbone_config=UpperCamelCase_ )
lowerCAmelCase__ = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
lowerCAmelCase__ = 847
lowerCAmelCase__ = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
lowerCAmelCase__ = 150
lowerCAmelCase__ = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
lowerCAmelCase__ = 171
lowerCAmelCase__ = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
lowerCAmelCase__ = 133
lowerCAmelCase__ = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
lowerCAmelCase__ = 19
lowerCAmelCase__ = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
lowerCAmelCase__ = 65
lowerCAmelCase__ = 'mapillary-vistas-id2label.json'
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
return config
def _UpperCamelCase ( UpperCamelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def _UpperCamelCase ( UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:dim, :]
lowerCAmelCase__ = in_proj_bias[: dim]
lowerCAmelCase__ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase__ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase__ = in_proj_weight[
-dim :, :
]
lowerCAmelCase__ = in_proj_bias[-dim :]
# fmt: on
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
lowerCAmelCase__ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[: hidden_size, :]
lowerCAmelCase__ = in_proj_bias[:config.hidden_size]
lowerCAmelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCAmelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase__ = in_proj_weight[-hidden_size :, :]
lowerCAmelCase__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
lowerCAmelCase__ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[: hidden_size, :]
lowerCAmelCase__ = in_proj_bias[:config.hidden_size]
lowerCAmelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCAmelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase__ = in_proj_weight[-hidden_size :, :]
lowerCAmelCase__ = in_proj_bias[-hidden_size :]
# fmt: on
def _UpperCamelCase ( ) -> torch.Tensor:
"""simple docstring"""
lowerCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : bool = False ) -> int:
"""simple docstring"""
lowerCAmelCase__ = get_maskformer_config(UpperCamelCase_ )
# load original state_dict
with open(UpperCamelCase_ , 'rb' ) as f:
lowerCAmelCase__ = pickle.load(UpperCamelCase_ )
lowerCAmelCase__ = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_swin_q_k_v(UpperCamelCase_ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase_ , UpperCamelCase_ )
# update to torch tensors
for key, value in state_dict.items():
lowerCAmelCase__ = torch.from_numpy(UpperCamelCase_ )
# load 🤗 model
lowerCAmelCase__ = MaskFormerForInstanceSegmentation(UpperCamelCase_ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase_ , param.shape )
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase_ ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
lowerCAmelCase__ = prepare_img()
if "vistas" in model_name:
lowerCAmelCase__ = 65
elif "cityscapes" in model_name:
lowerCAmelCase__ = 6_5535
else:
lowerCAmelCase__ = 255
lowerCAmelCase__ = True if 'ade' in model_name else False
lowerCAmelCase__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase_ , reduce_labels=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(UpperCamelCase_ , return_tensors='pt' )
lowerCAmelCase__ = model(**UpperCamelCase_ )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCAmelCase__ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case : Union[str, Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 122
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase=3 , _UpperCamelCase=32 , _UpperCamelCase=3 , _UpperCamelCase=10 , _UpperCamelCase=[10, 20, 30, 40] , _UpperCamelCase=[1, 1, 2, 1] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="relu" , _UpperCamelCase=3 , _UpperCamelCase=None , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embeddings_size
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = len(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = TFResNetModel(config=_UpperCamelCase )
lowerCAmelCase__ = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFResNetForImageClassification(_UpperCamelCase )
lowerCAmelCase__ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFResNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(_UpperCamelCase )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase__ = model_class(_UpperCamelCase )
lowerCAmelCase__ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase__ = layer_type
lowerCAmelCase__ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFResNetModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=_UpperCamelCase , return_tensors='tf' )
# forward pass
lowerCAmelCase__ = model(**_UpperCamelCase )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
lowerCAmelCase__ = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCamelCase , atol=1E-4 ) )
| 122
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: int = logging.get_logger(__name__)
_lowercase: List[str] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class _lowercase ( snake_case__ ):
"""simple docstring"""
__A = "pix2struct_text_model"
__A = ["past_key_values"]
__A = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , lowerCamelCase_=50244 , lowerCamelCase_=768 , lowerCamelCase_=64 , lowerCamelCase_=2048 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=32 , lowerCamelCase_=128 , lowerCamelCase_=0.1 , lowerCamelCase_=1E-6 , lowerCamelCase_=1.0 , lowerCamelCase_="gelu_new" , lowerCamelCase_=0 , lowerCamelCase_=False , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=False , lowerCamelCase_=True , **lowerCamelCase_ , ):
"""simple docstring"""
a = vocab_size
a = hidden_size
a = d_kv
a = d_ff
a = num_layers
a = num_heads
a = relative_attention_num_buckets
a = relative_attention_max_distance
a = dropout_rate
a = layer_norm_epsilon
a = initializer_factor
a = use_cache
a = eos_token_id
a = decoder_start_token_id
# for backwards compatibility
a = dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case__ )
a = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
a = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class _lowercase ( snake_case__ ):
"""simple docstring"""
__A = "pix2struct_vision_model"
def __init__(self , lowerCamelCase_=768 , lowerCamelCase_=768 , lowerCamelCase_=2048 , lowerCamelCase_=64 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_="gelu_new" , lowerCamelCase_=1E-6 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=1E-1_0 , lowerCamelCase_=1.0 , lowerCamelCase_=4096 , lowerCamelCase_=32 , lowerCamelCase_=128 , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
a = hidden_size
a = patch_embed_hidden_size
a = d_ff
a = dropout_rate
a = num_hidden_layers
a = num_attention_heads
a = initializer_range
a = initializer_factor
a = attention_dropout
a = layer_norm_eps
a = dense_act_fn
a = seq_len
a = relative_attention_num_buckets
a = relative_attention_max_distance
a = d_kv
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case__ )
a = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
a = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class _lowercase ( snake_case__ ):
"""simple docstring"""
__A = "pix2struct"
__A = True
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=1.0 , lowerCamelCase_=0.02 , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=True , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
a = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
a = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
a = PixaStructTextConfig(**snake_case__ )
a = PixaStructVisionConfig(**snake_case__ )
a = self.text_config.decoder_start_token_id
a = self.text_config.pad_token_id
a = self.text_config.eos_token_id
a = initializer_factor
a = initializer_range
a = self.initializer_range
a = self.initializer_range
a = is_vqa
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
a = self.text_config.to_dict()
a = self.vision_config.to_dict()
a = self.__class__.model_type
return output
| 227
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :torch.FloatTensor
class lowercase__ ( snake_case__, snake_case__ ):
@register_to_config
def __init__( self : Optional[int] , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 32 , snake_case__ : int = 256 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.18_215 , snake_case__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase_ : List[str] =Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
lowerCamelCase_ : Union[str, Any] =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ : List[Any] =nn.Convad(snake_case__ , snake_case__ , 1 )
lowerCamelCase_ : int =VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
lowerCamelCase_ : int =nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
lowerCamelCase_ : Union[str, Any] =Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : int =self.encoder(snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.quantize(snake_case__ )
else:
lowerCamelCase_ : List[Any] =h
lowerCamelCase_ : List[Any] =self.post_quant_conv(snake_case__ )
lowerCamelCase_ : Dict =self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : Dict =sample
lowerCamelCase_ : Optional[Any] =self.encode(snake_case__ ).latents
lowerCamelCase_ : str =self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 144
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''distilbert'''
snake_case = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : str , __UpperCAmelCase : Any=30522 , __UpperCAmelCase : Any=512 , __UpperCAmelCase : Any=False , __UpperCAmelCase : Union[str, Any]=6 , __UpperCAmelCase : int=12 , __UpperCAmelCase : Dict=768 , __UpperCAmelCase : Dict=4 * 768 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : int=0.2 , __UpperCAmelCase : Optional[Any]=0 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
_A = vocab_size
_A = max_position_embeddings
_A = sinusoidal_pos_embds
_A = n_layers
_A = n_heads
_A = dim
_A = hidden_dim
_A = dropout
_A = attention_dropout
_A = activation
_A = initializer_range
_A = qa_dropout
_A = seq_classif_dropout
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 174
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
_A = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = "sgugger/tiny-distilbert-classification"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = "patrickvonplaten/t5-tiny-random"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , configs=[config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__UpperCAmelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__UpperCAmelCase , "env.csv" ) , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "env.csv" ) ).exists() )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCAmelCase : Any ):
self.assertTrue(hasattr(__UpperCAmelCase , "sequential" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "cumulative" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "current" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , "log.txt" ) , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "log.txt" ) ).exists() )
| 174
| 1
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Optional[Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ ):
super().__init__()
_lowerCAmelCase : Any = module
_lowerCAmelCase : Tuple = nn.Sequential(
nn.Linear(module.in_features , snake_case_ , bias=snake_case_ ) , nn.Linear(snake_case_ , module.out_features , bias=snake_case_ ) , )
_lowerCAmelCase : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=snake_case_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCamelCase ( self , snake_case_ , *snake_case_ , **snake_case_ ):
return self.module(snake_case_ , *snake_case_ , **snake_case_ ) + self.adapter(snake_case_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a_ (unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__lowerCAmelCase : List[Any] = """bigscience/bloom-1b7"""
# Constant values
__lowerCAmelCase : Optional[int] = 2.109659552692574
__lowerCAmelCase : int = """Hello my name is"""
__lowerCAmelCase : Tuple = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
__lowerCAmelCase : Optional[Any] = 1_0
def __UpperCamelCase ( self ):
# Models and tokenizer
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(self.model_name )
class a_ (_a ):
def __UpperCamelCase ( self ):
super().setUp()
# Models and tokenizer
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
def __UpperCamelCase ( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.model_abit.config
self.assertTrue(hasattr(snake_case_ , """quantization_config""" ) )
_lowerCAmelCase : str = config.to_dict()
_lowerCAmelCase : List[Any] = config.to_diff_dict()
_lowerCAmelCase : Union[str, Any] = config.to_json_string()
def __UpperCamelCase ( self ):
from bitsandbytes.nn import Paramsabit
_lowerCAmelCase : Tuple = self.model_fpaa.get_memory_footprint()
_lowerCAmelCase : Any = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_lowerCAmelCase : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCamelCase ( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(snake_case_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
_lowerCAmelCase : List[Any] = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = BitsAndBytesConfig()
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , device_map="""auto""" )
_lowerCAmelCase : List[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" )
_lowerCAmelCase : str = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self ):
with self.assertRaises(snake_case_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = BitsAndBytesConfig()
with self.assertRaises(snake_case_ ):
_lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , load_in_abit=snake_case_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __UpperCamelCase ( self ):
with self.assertRaises(snake_case_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(snake_case_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_lowerCAmelCase : Any = self.tokenizer(self.input_text , return_tensors="""pt""" )
_lowerCAmelCase : Optional[Any] = self.model_fpaa.to(torch.floataa )
_lowerCAmelCase : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
_lowerCAmelCase : Dict = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
_lowerCAmelCase : Tuple = self.model_fpaa.half()
# Check this does not throw an error
_lowerCAmelCase : Optional[int] = self.model_fpaa.float()
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=snake_case_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a_ (unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls ):
_lowerCAmelCase : Optional[int] = """t5-small"""
_lowerCAmelCase : Optional[Any] = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(cls.model_name )
_lowerCAmelCase : Optional[Any] = """Translate in German: Hello, my dog is cute"""
def __UpperCamelCase ( self ):
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
from transformers import TaForConditionalGeneration
_lowerCAmelCase : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules
_lowerCAmelCase : Optional[int] = None
# test with `t5-small`
_lowerCAmelCase : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
_lowerCAmelCase : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
_lowerCAmelCase : Tuple = model.generate(**snake_case_ )
# test with `flan-t5-small`
_lowerCAmelCase : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="""auto""" )
_lowerCAmelCase : int = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
_lowerCAmelCase : Optional[Any] = model.generate(**snake_case_ )
_lowerCAmelCase : Optional[Any] = modules
def __UpperCamelCase ( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_lowerCAmelCase : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_lowerCAmelCase : str = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
_lowerCAmelCase : List[str] = model.generate(**snake_case_ )
# test with `flan-t5-small`
_lowerCAmelCase : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="""auto""" )
_lowerCAmelCase : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
_lowerCAmelCase : Tuple = model.generate(**snake_case_ )
class a_ (_a ):
def __UpperCamelCase ( self ):
super().setUp()
# model_name
_lowerCAmelCase : List[str] = """bigscience/bloom-560m"""
_lowerCAmelCase : Union[str, Any] = """t5-small"""
# Different types of model
_lowerCAmelCase : Dict = AutoModel.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
# Sequence classification model
_lowerCAmelCase : int = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
# CausalLM model
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
# Seq2seq model
_lowerCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=snake_case_ , device_map="""auto""" )
def __UpperCamelCase ( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class a_ (_a ):
def __UpperCamelCase ( self ):
super().setUp()
def __UpperCamelCase ( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_lowerCAmelCase : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class a_ (_a ):
def __UpperCamelCase ( self ):
super().setUp()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_lowerCAmelCase : int = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
_lowerCAmelCase : Optional[int] = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
class a_ (_a ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = """facebook/opt-350m"""
super().setUp()
def __UpperCamelCase ( self ):
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
_lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_lowerCAmelCase : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_lowerCAmelCase : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(snake_case_ ) ):
_lowerCAmelCase : List[Any] = LoRALayer(module.q_proj , rank=1_6 )
_lowerCAmelCase : str = LoRALayer(module.k_proj , rank=1_6 )
_lowerCAmelCase : str = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
_lowerCAmelCase : Dict = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_lowerCAmelCase : Dict = model.forward(**snake_case_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(snake_case_ , snake_case_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(snake_case_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class a_ (_a ):
__lowerCAmelCase : Any = """gpt2-xl"""
__lowerCAmelCase : Optional[Any] = 3.3191854854152187
| 309
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 1
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case (__lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
_snake_case : Optional[int] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_snake_case : Any = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__lowercase ):
os.makedirs(__lowercase )
_snake_case : Union[str, Any] = model.state_dict()
def to_tf_var_name(__lowercase ):
for patt, repl in iter(__lowercase ):
_snake_case : Dict = name.replace(__lowercase , __lowercase )
return F"""bert/{name}"""
def create_tf_var(__lowercase , __lowercase , __lowercase ):
_snake_case : int = tf.dtypes.as_dtype(tensor.dtype )
_snake_case : List[str] = tf.get_variable(dtype=__lowercase , shape=tensor.shape , name=__lowercase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_snake_case : List[str] = to_tf_var_name(__lowercase )
_snake_case : Optional[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_snake_case : List[Any] = torch_tensor.T
_snake_case : str = create_tf_var(tensor=__lowercase , name=__lowercase , session=__lowercase )
tf.keras.backend.set_value(__lowercase , __lowercase )
_snake_case : Dict = session.run(__lowercase )
print(F"""Successfully created {tf_name}: {np.allclose(__lowercase , __lowercase )}""" )
_snake_case : Tuple = tf.train.Saver(tf.trainable_variables() )
saver.save(__lowercase , os.path.join(__lowercase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def snake_case (__lowercase=None ) -> Tuple:
'''simple docstring'''
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__lowercase , required=__lowercase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__lowercase , default=__lowercase , required=__lowercase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__lowercase , required=__lowercase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__lowercase , required=__lowercase , help="Directory in which to save tensorflow model" )
_snake_case : Tuple = parser.parse_args(__lowercase )
_snake_case : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 284
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Any = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 284
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = '''swin2sr'''
__UpperCamelCase : str = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , lowerCAmelCase_ : int=6_4 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=1_8_0 , lowerCAmelCase_ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : Tuple=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : Any=2.0 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Optional[Any]=1e-5 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : int=1.0 , lowerCAmelCase_ : Any="1conv" , lowerCAmelCase_ : List[str]="pixelshuffle" , **lowerCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[str] = image_size
_A: Any = patch_size
_A: Any = num_channels
_A: Union[str, Any] = embed_dim
_A: int = depths
_A: List[Any] = len(lowerCAmelCase_ )
_A: int = num_heads
_A: Any = window_size
_A: Optional[int] = mlp_ratio
_A: int = qkv_bias
_A: List[Any] = hidden_dropout_prob
_A: List[str] = attention_probs_dropout_prob
_A: List[Any] = drop_path_rate
_A: Any = hidden_act
_A: List[str] = use_absolute_embeddings
_A: Tuple = layer_norm_eps
_A: str = initializer_range
_A: int = upscale
_A: int = img_range
_A: Optional[Any] = resi_connection
_A: int = upsampler
| 121
| 0
|
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : List[Any] ):
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE_ = {}
self.num_vertices += 1
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ):
self.add_vertex(_lowerCAmelCase )
self.add_vertex(_lowerCAmelCase )
if head == tail:
return
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
edges.remove((tail, head, weight) )
for i in range(len(_lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = list(edges[i] )
edges.sort(key=lambda _lowerCAmelCase : e[2] )
for i in range(len(_lowerCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE_ = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def __str__( self : Dict ):
SCREAMING_SNAKE_CASE_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE_ = self.adjacency[head][tail]
string += F"{head} -> {tail} == {weight}\n"
return string.rstrip('\n' )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCAmelCase_ ( self : Any ):
return self.adjacency.keys()
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : int=None , _lowerCAmelCase : Dict=None ):
SCREAMING_SNAKE_CASE_ = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE_ = []
if edges is None:
SCREAMING_SNAKE_CASE_ = []
for vertex in vertices:
g.add_vertex(_lowerCAmelCase )
for edge in edges:
g.add_edge(*_lowerCAmelCase )
return g
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
def __len__( self : Dict ):
return len(self.parent )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str ):
if item in self.parent:
return self.find(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = item
SCREAMING_SNAKE_CASE_ = 0
return item
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
if item not in self.parent:
return self.make_set(_lowerCAmelCase )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE_ = self.find(self.parent[item] )
return self.parent[item]
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = self.find(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.find(_lowerCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE_ = roota
return roota
return None
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = graph.num_vertices
SCREAMING_SNAKE_CASE_ = Graph.UnionFind()
SCREAMING_SNAKE_CASE_ = []
while num_components > 1:
SCREAMING_SNAKE_CASE_ = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
SCREAMING_SNAKE_CASE_ = union_find.find(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = union_find.find(_lowerCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cheap_edge[vertex]
if union_find.find(_lowerCAmelCase ) != union_find.find(_lowerCAmelCase ):
union_find.union(_lowerCAmelCase , _lowerCAmelCase )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE_ = num_components - 1
SCREAMING_SNAKE_CASE_ = Graph.build(edges=_lowerCAmelCase )
return mst
| 210
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ""
lowercase_ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , _lowerCAmelCase : Optional[DatasetInfo] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : int , ):
super().__init__(self , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = repo_info
SCREAMING_SNAKE_CASE_ = token
SCREAMING_SNAKE_CASE_ = None
def lowerCAmelCase_ ( self : Tuple ):
if self.dir_cache is None:
SCREAMING_SNAKE_CASE_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE_ = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowerCAmelCase ): {'name': str(_lowerCAmelCase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str = "rb" , **_lowerCAmelCase : Optional[Any] , ):
if not isinstance(self.repo_info , _lowerCAmelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
SCREAMING_SNAKE_CASE_ = hf_hub_url(self.repo_info.id , _lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCAmelCase , mode=_lowerCAmelCase , headers=get_authentication_headers_for_url(_lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , **_lowerCAmelCase : Dict ):
self._get_dirs()
SCREAMING_SNAKE_CASE_ = self._strip_protocol(_lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False , **_lowerCAmelCase : str ):
self._get_dirs()
SCREAMING_SNAKE_CASE_ = PurePosixPath(path.strip('/' ) )
SCREAMING_SNAKE_CASE_ = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE_ = PurePosixPath(p.strip('/' ) )
SCREAMING_SNAKE_CASE_ = p.parent
if root == path:
SCREAMING_SNAKE_CASE_ = f
SCREAMING_SNAKE_CASE_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 210
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_A = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCamelCase__ ( a__ : Union[str, Any] ) -> Optional[int]:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def lowerCamelCase__ ( a__ : int ) -> Dict:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a__ )
def lowerCamelCase__ ( a__ : Union[str, Any] ) -> List[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase_ = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(a__ , id=a__ )
def lowerCamelCase__ ( a__ : Optional[int] , a__ : int ) -> Optional[int]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
UpperCamelCase_ = 0
# Doctest custom flag to ignore output.
_A = doctest.register_optionflag('''IGNORE_RESULT''')
_A = doctest.OutputChecker
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_A = CustomOutputChecker
_A = HfDoctestModule
_A = HfDocTestParser
| 122
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase__ ( a__ : Any , a__ : Union[str, Any] ) -> int:
UpperCamelCase_ = checkpoint
UpperCamelCase_ = {}
UpperCamelCase_ = vae_state_dict["""encoder.conv_in.weight"""]
UpperCamelCase_ = vae_state_dict["""encoder.conv_in.bias"""]
UpperCamelCase_ = vae_state_dict["""encoder.conv_out.weight"""]
UpperCamelCase_ = vae_state_dict["""encoder.conv_out.bias"""]
UpperCamelCase_ = vae_state_dict["""encoder.norm_out.weight"""]
UpperCamelCase_ = vae_state_dict["""encoder.norm_out.bias"""]
UpperCamelCase_ = vae_state_dict["""decoder.conv_in.weight"""]
UpperCamelCase_ = vae_state_dict["""decoder.conv_in.bias"""]
UpperCamelCase_ = vae_state_dict["""decoder.conv_out.weight"""]
UpperCamelCase_ = vae_state_dict["""decoder.conv_out.bias"""]
UpperCamelCase_ = vae_state_dict["""decoder.norm_out.weight"""]
UpperCamelCase_ = vae_state_dict["""decoder.norm_out.bias"""]
UpperCamelCase_ = vae_state_dict["""quant_conv.weight"""]
UpperCamelCase_ = vae_state_dict["""quant_conv.bias"""]
UpperCamelCase_ = vae_state_dict["""post_quant_conv.weight"""]
UpperCamelCase_ = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
UpperCamelCase_ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
UpperCamelCase_ = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(a__ )
}
# Retrieves the keys for the decoder up blocks only
UpperCamelCase_ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
UpperCamelCase_ = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(a__ )
}
for i in range(a__ ):
UpperCamelCase_ = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
UpperCamelCase_ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
UpperCamelCase_ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
UpperCamelCase_ = renew_vae_resnet_paths(a__ )
UpperCamelCase_ = {"""old""": f'''down.{i}.block''', """new""": f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
UpperCamelCase_ = [key for key in vae_state_dict if """encoder.mid.block""" in key]
UpperCamelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCamelCase_ = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
UpperCamelCase_ = renew_vae_resnet_paths(a__ )
UpperCamelCase_ = {"""old""": f'''mid.block_{i}''', """new""": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
UpperCamelCase_ = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
UpperCamelCase_ = renew_vae_attention_paths(a__ )
UpperCamelCase_ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
conv_attn_to_linear(a__ )
for i in range(a__ ):
UpperCamelCase_ = num_up_blocks - 1 - i
UpperCamelCase_ = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
UpperCamelCase_ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
UpperCamelCase_ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
UpperCamelCase_ = renew_vae_resnet_paths(a__ )
UpperCamelCase_ = {"""old""": f'''up.{block_id}.block''', """new""": f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
UpperCamelCase_ = [key for key in vae_state_dict if """decoder.mid.block""" in key]
UpperCamelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCamelCase_ = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
UpperCamelCase_ = renew_vae_resnet_paths(a__ )
UpperCamelCase_ = {"""old""": f'''mid.block_{i}''', """new""": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
UpperCamelCase_ = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
UpperCamelCase_ = renew_vae_attention_paths(a__ )
UpperCamelCase_ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
conv_attn_to_linear(a__ )
return new_checkpoint
def lowerCamelCase__ ( a__ : str , a__ : str , ) -> List[Any]:
# Only support V1
UpperCamelCase_ = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
UpperCamelCase_ = io.BytesIO(r.content )
UpperCamelCase_ = OmegaConf.load(a__ )
UpperCamelCase_ = 512
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
UpperCamelCase_ = {}
with safe_open(a__ , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
UpperCamelCase_ = f.get_tensor(a__ )
else:
UpperCamelCase_ = torch.load(a__ , map_location=a__ )["""state_dict"""]
# Convert the VAE model.
UpperCamelCase_ = create_vae_diffusers_config(a__ , image_size=a__ )
UpperCamelCase_ = custom_convert_ldm_vae_checkpoint(a__ , a__ )
UpperCamelCase_ = AutoencoderKL(**a__ )
vae.load_state_dict(a__ )
vae.save_pretrained(a__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
_A = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 122
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = PhobertTokenizer
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Any = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
UpperCAmelCase__ : Any = dict(zip(a__ , range(len(a__ ) ) ) )
UpperCAmelCase__ : List[Any] = ['''#version: 0.2''', '''l à</w>''']
UpperCAmelCase__ : List[Any] = {'''unk_token''': '''<unk>'''}
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def lowercase_ ( self : Dict , **_A : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **a__ )
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''Tôi là VinAI Research'''
UpperCAmelCase__ : Any = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase__ : int = '''Tôi là VinAI Research'''
UpperCAmelCase__ : Optional[int] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
UpperCAmelCase__ : int = tokenizer.tokenize(a__ )
print(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase__ : int = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : Dict = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 360
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : str = (32, 32)
UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 4
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : List[str] = (32, 32)
UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (4, 32, 32)
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
UpperCAmelCase__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : Union[str, Any] = noise.to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A )
UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1e-3 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : str = noise.to(_A )
UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) )
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any , _A : str=(32, 32) ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : List[str] = self.dummy_input
UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A )
UpperCAmelCase__ : Optional[Any] = noise
UpperCAmelCase__ : Any = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : Dict = (256, 256)
UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : int = (32, 32)
UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : int = model(_A , _A ).sample
UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
| 299
| 0
|
'''simple docstring'''
def __magic_name__( lowerCamelCase = 1_0, lowerCamelCase = 2_2):
__lowerCAmelCase = range(1, lowerCamelCase)
__lowerCAmelCase = range(1, lowerCamelCase)
return sum(
1 for power in powers for base in bases if len(str(base**power)) == power)
if __name__ == "__main__":
print(f"""{solution(1_0, 2_2) = }""")
| 174
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174
| 1
|
"""simple docstring"""
def lowercase__(A , A ) ->float:
"""simple docstring"""
return base * power(lowercase_ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
__UpperCAmelCase : Union[str, Any] = int(input("""Enter the base: """).strip())
__UpperCAmelCase : Tuple = int(input("""Enter the exponent: """).strip())
__UpperCAmelCase : Dict = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__UpperCAmelCase : Tuple = 1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 357
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = """pt"""
elif is_tf_available():
a : Union[str, Any] = """tf"""
else:
a : Any = """jax"""
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = PerceiverTokenizer
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ : str= PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ):
'''simple docstring'''
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowercase__ : Union[str, Any]= []
for i in range(len(snake_case__ ) ):
try:
lowercase__ : Any= tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ : int= list(filter(lambda snake_case__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case__ ) )
lowercase__ : Union[str, Any]= list(filter(lambda snake_case__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case__ ) , snake_case__ ) )
if max_length is not None and len(snake_case__ ) > max_length:
lowercase__ : int= toks[:max_length]
if min_length is not None and len(snake_case__ ) < min_length and len(snake_case__ ) > 0:
while len(snake_case__ ) < min_length:
lowercase__ : List[str]= toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ : str= [t[0] for t in toks]
# Ensure consistency
lowercase__ : Optional[Any]= tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
if " " not in output_txt and len(snake_case__ ) > 1:
lowercase__ : Dict= (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case__ )
)
if with_prefix_space:
lowercase__ : List[Any]= " " + output_txt
lowercase__ : Any= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
return output_txt, output_ids
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.perceiver_tokenizer
lowercase__ : Union[str, Any]= "Unicode €."
lowercase__ : Tuple= tokenizer(snake_case__ )
lowercase__ : Dict= [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , snake_case__ )
# decoding
lowercase__ : List[str]= tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , "[CLS]Unicode €.[SEP]" )
lowercase__ : List[str]= tokenizer("e è é ê ë" )
lowercase__ : int= [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , snake_case__ )
# decoding
lowercase__ : Any= tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.perceiver_tokenizer
lowercase__ : Tuple= ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ : List[Any]= [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowercase__ : Dict= tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
if FRAMEWORK != "jax":
lowercase__ : List[Any]= list(batch.input_ids.numpy()[0] )
else:
lowercase__ : str= list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.perceiver_tokenizer
lowercase__ : int= ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : Union[str, Any]= tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case__ )
self.assertIn("attention_mask" , snake_case__ )
self.assertNotIn("decoder_input_ids" , snake_case__ )
self.assertNotIn("decoder_attention_mask" , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.perceiver_tokenizer
lowercase__ : int= [
"Summary of the text.",
"Another summary.",
]
lowercase__ : List[Any]= tokenizer(
text_target=snake_case__ , max_length=32 , padding="max_length" , truncation=snake_case__ , return_tensors=snake_case__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# safety check on max_len default value so we are sure the test works
lowercase__ : Union[str, Any]= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowercase__ : List[str]= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ : str= tempfile.mkdtemp()
lowercase__ : str= " He is very happy, UNwant\u00E9d,running"
lowercase__ : int= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
lowercase__ : List[str]= tokenizer.__class__.from_pretrained(snake_case__ )
lowercase__ : Tuple= after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
shutil.rmtree(snake_case__ )
lowercase__ : List[str]= self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ : Tuple= tempfile.mkdtemp()
lowercase__ : List[Any]= " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ : Tuple= tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ : List[str]= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
lowercase__ : Tuple= tokenizer.__class__.from_pretrained(snake_case__ )
lowercase__ : Tuple= after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowercase__ : Tuple= tokenizer.__class__.from_pretrained(snake_case__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowercase__ : Optional[Any]= json.load(snake_case__ )
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowercase__ : Optional[Any]= json.load(snake_case__ )
lowercase__ : List[str]= [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ : Optional[int]= added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ : Union[str, Any]= added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case__ , snake_case__ )
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case__ , snake_case__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ : Optional[int]= tokenizer_class.from_pretrained(
snake_case__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ : int= added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case__ )]
lowercase__ : int= tokenizer_class.from_pretrained(
snake_case__ , additional_special_tokens=snake_case__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowercase__ : Optional[int]= self.get_tokenizers(fast=snake_case__ , do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ : Optional[int]= ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
lowercase__ : Optional[int]= tokenizer.convert_tokens_to_string(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
| 150
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Tuple = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 284
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ = ['small', 'medium', 'large']
SCREAMING_SNAKE_CASE_ = 'lm_head.decoder.weight'
SCREAMING_SNAKE_CASE_ = 'lm_head.weight'
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> List[str]:
_UpperCAmelCase : List[Any] = torch.load(UpperCAmelCase_ )
_UpperCAmelCase : Dict = d.pop(UpperCAmelCase_ )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
torch.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
SCREAMING_SNAKE_CASE_ = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
SCREAMING_SNAKE_CASE_ = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 353
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class a ( UpperCAmelCase ):
_lowercase = "autoformer"
_lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = True , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 64 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 32 , A_ = 32 , A_ = "gelu" , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_ = True , A_=True , A_ = 10 , A_ = 25 , A_ = 3 , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = prediction_length
_UpperCAmelCase : Dict = context_length if context_length is not None else prediction_length
_UpperCAmelCase : Tuple = distribution_output
_UpperCAmelCase : List[Any] = loss
_UpperCAmelCase : Optional[Any] = input_size
_UpperCAmelCase : int = num_time_features
_UpperCAmelCase : str = lags_sequence
_UpperCAmelCase : Union[str, Any] = scaling
_UpperCAmelCase : Union[str, Any] = num_dynamic_real_features
_UpperCAmelCase : int = num_static_real_features
_UpperCAmelCase : int = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : Union[str, Any] = cardinality
else:
_UpperCAmelCase : Any = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : int = embedding_dimension
else:
_UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Any = encoder_attention_heads
_UpperCAmelCase : str = decoder_attention_heads
_UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
_UpperCAmelCase : Any = decoder_ffn_dim
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : Optional[int] = decoder_layers
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : Dict = activation_dropout
_UpperCAmelCase : Dict = encoder_layerdrop
_UpperCAmelCase : int = decoder_layerdrop
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : str = init_std
_UpperCAmelCase : Tuple = use_cache
# Autoformer
_UpperCAmelCase : str = label_length
_UpperCAmelCase : Any = moving_average
_UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 189
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : int = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Any = '''wavlm'''
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=1_28 , lowerCAmelCase__=16 , lowerCAmelCase__=3_20 , lowerCAmelCase__=8_00 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=3_20 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_00 , lowerCAmelCase__=2_56 , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2_56 , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=5_12 , lowerCAmelCase__=80 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = conv_bias
__lowercase = num_buckets
__lowercase = max_bucket_distance
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = num_ctc_classes
__lowercase = vocab_size
__lowercase = do_stable_layer_norm
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase = num_codevectors_per_group
__lowercase = num_codevector_groups
__lowercase = contrastive_logits_temperature
__lowercase = num_negatives
__lowercase = codevector_dim
__lowercase = proj_codevector_dim
__lowercase = diversity_loss_weight
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# adapter
__lowercase = add_adapter
__lowercase = adapter_kernel_size
__lowercase = adapter_stride
__lowercase = num_adapter_layers
__lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 210
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__a : str = 1_6
__a : str = 3_2
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return int(x / 2**20 )
class _UpperCamelCase :
"""simple docstring"""
def __enter__( self ) -> str:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__lowercase = torch.cuda.memory_allocated()
return self
def __exit__( self , *lowerCAmelCase__ ) -> int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__lowercase = torch.cuda.memory_allocated()
__lowercase = torch.cuda.max_memory_allocated()
__lowercase = bamb(self.end - self.begin )
__lowercase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCAmelCase ( lowercase , lowercase = 16 , lowercase = "bert-base-cased" , lowercase = 320 , lowercase = 160 , ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(lowercase )
__lowercase = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F"train[:{n_train}]", '''validation''': F"validation[:{n_val}]"} )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
lowercase , batched=lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
__lowercase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config['''lr''']
__lowercase = int(config['''num_epochs'''] )
__lowercase = int(config['''seed'''] )
__lowercase = int(config['''batch_size'''] )
__lowercase = args.model_name_or_path
set_seed(lowercase )
__lowercase , __lowercase = get_dataloaders(lowercase , lowercase , lowercase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained(lowercase , return_dict=lowercase )
# Instantiate optimizer
__lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase = optimizer_cls(params=model.parameters() , lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
__lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__lowercase = 1
__lowercase = (len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=0 , num_training_steps=lowercase , )
else:
__lowercase = DummyScheduler(lowercase , total_num_steps=lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
__lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase = 0
# Now we train the model
__lowercase = {}
for epoch in range(lowercase , lowercase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase ):
__lowercase = model(**lowercase )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__lowercase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(lowercase , lowercase )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowercase , )
parser.add_argument(
'''--output_dir''' , type=lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=lowercase , default=lowercase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=lowercase , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=lowercase , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowercase , default=1 , help='''Number of train epochs.''' , )
__lowercase = parser.parse_args()
__lowercase = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 210
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'spiece.model'}
lowercase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
lowercase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
lowercase__ ='▁'
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple=True , snake_case_ : Any=True , snake_case_ : List[Any]=False , snake_case_ : List[str]="[CLS]" , snake_case_ : List[Any]="[SEP]" , snake_case_ : List[str]="<unk>" , snake_case_ : Dict="[SEP]" , snake_case_ : Tuple="<pad>" , snake_case_ : str="[CLS]" , snake_case_ : List[str]="[MASK]" , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a : List[Any] = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
__a : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
__a : List[str] = do_lower_case
__a : Optional[int] = remove_space
__a : int = keep_accents
__a : Dict = vocab_file
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def lowerCAmelCase (self : Optional[Any] ):
return len(self.sp_model )
def lowerCAmelCase (self : List[Any] ):
__a : Optional[Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Union[str, Any] ):
__a : Union[str, Any] = self.__dict__.copy()
__a : Optional[int] = None
return state
def __setstate__(self : Any , snake_case_ : Optional[int] ):
__a : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a : Union[str, Any] = {}
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Union[str, Any] ):
if self.remove_space:
__a : int = ''' '''.join(inputs.strip().split() )
else:
__a : Union[str, Any] = inputs
__a : int = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__a : Dict = unicodedata.normalize('''NFKD''' , snake_case_ )
__a : str = ''''''.join([c for c in outputs if not unicodedata.combining(snake_case_ )] )
if self.do_lower_case:
__a : Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase (self : Optional[int] , snake_case_ : str ):
__a : List[Any] = self.preprocess_text(snake_case_ )
__a : Any = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
__a : str = []
for piece in pieces:
if len(snake_case_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__a : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a : int = cur_pieces[1:]
else:
__a : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case_ )
else:
new_pieces.append(snake_case_ )
return new_pieces
def lowerCAmelCase (self : List[Any] , snake_case_ : Dict ):
return self.sp_model.PieceToId(snake_case_ )
def lowerCAmelCase (self : int , snake_case_ : Tuple ):
return self.sp_model.IdToPiece(snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Union[str, Any] ):
__a : Any = []
__a : Optional[Any] = ''''''
__a : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
__a : Optional[int] = True
__a : int = []
else:
current_sub_tokens.append(snake_case_ )
__a : Any = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def lowerCAmelCase (self : List[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : Tuple = [self.sep_token_id]
__a : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase (self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : Union[str, Any] = [self.sep_token_id]
__a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase (self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__a : Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
__a : int = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 90
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = PhobertTokenizer
_SCREAMING_SNAKE_CASE : int = False
def lowerCAmelCase (self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Optional[int] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__a : Tuple = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__a : int = ['''#version: 0.2''', '''l à</w>''']
__a : List[Any] = {'''unk_token''': '''<unk>'''}
__a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case_ ) )
def lowerCAmelCase (self : Union[str, Any] , **snake_case_ : List[str] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : Dict ):
__a : Union[str, Any] = '''Tôi là VinAI Research'''
__a : int = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : Any = '''Tôi là VinAI Research'''
__a : Union[str, Any] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__a : List[str] = tokenizer.tokenize(snake_case_ )
print(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__a : str = tokens + [tokenizer.unk_token]
__a : str = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 90
| 1
|
"""simple docstring"""
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5)
SCREAMING_SNAKE_CASE_ : List[Any] = Rectangle(height=0.25 , width=0.25)
SCREAMING_SNAKE_CASE_ : List[Any] = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : List[str] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : List[str] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : Dict = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[Any] = Text('''CPU''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Tuple = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = [mem.copy() for i in range(4)]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[str] = Text('''GPU''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : int = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[Any] = Text('''Model''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Any = []
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
model_cpu_arr.append(lowercase_)
self.add(*lowercase_ , *lowercase_ , *lowercase_)
SCREAMING_SNAKE_CASE_ : Any = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Any = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=24)
SCREAMING_SNAKE_CASE_ : List[Any] = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
checkpoint.move_to([3, 0.5, 0])
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Dict = []
for i, rect in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : Any = fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
ckpt_arr.append(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.move_to(cpu_right_col_base[i - 5])
ckpt_cpu_arr.append(lowercase_)
self.add(*lowercase_ , *lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Square(side_length=2.2)
key.move_to([-5, 2, 0])
SCREAMING_SNAKE_CASE_ : int = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0])
SCREAMING_SNAKE_CASE_ : List[str] = [meta_mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Any = [meta_mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Any = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[Any] = VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : Dict = Text('''Disk''' , font_size=24)
SCREAMING_SNAKE_CASE_ : str = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
disk.move_to([-4.0, -1.25, 0])
self.play(Write(lowercase_ , run_time=3) , Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
SCREAMING_SNAKE_CASE_ : int = []
for i, rect in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : List[Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i]).scale(0.5)
animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(FadeOut(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24)
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_ , run_time=3))
self.play(
FadeOut(lowercase_ , lowercase_ , *lowercase_ , *lowercase_) , )
self.wait()
| 91
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
SCREAMING_SNAKE_CASE_ = TaBlock(_A )
self.encoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.token_embedder(_A )
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.dropout_pre(_A )
# inverted the attention mask
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_ = lyr(_A , _A )[0]
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 299
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase :List[str] = logging.get_logger(__name__)
lowerCAmelCase :Optional[Any] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[Any] = """table-transformer"""
A_ : Union[str, Any] = ["""past_key_values"""]
A_ : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : str , _A : List[Any]=True , _A : Optional[int]=None , _A : int=3 , _A : Dict=100 , _A : Tuple=6 , _A : Union[str, Any]=2048 , _A : Any=8 , _A : Any=6 , _A : Union[str, Any]=2048 , _A : str=8 , _A : List[Any]=0.0 , _A : Optional[int]=0.0 , _A : int=True , _A : str="relu" , _A : str=256 , _A : List[Any]=0.1 , _A : int=0.0 , _A : Union[str, Any]=0.0 , _A : Dict=0.02 , _A : List[str]=1.0 , _A : Union[str, Any]=False , _A : List[str]="sine" , _A : List[Any]="resnet50" , _A : int=True , _A : str=False , _A : Any=1 , _A : Union[str, Any]=5 , _A : Union[str, Any]=2 , _A : Optional[Any]=1 , _A : Any=1 , _A : Tuple=5 , _A : Optional[int]=2 , _A : Union[str, Any]=0.1 , **_A : Optional[int] , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__magic_name__ : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = backbone_config.get('model_type' )
__magic_name__ : str = CONFIG_MAPPING[backbone_model_type]
__magic_name__ : Optional[int] = config_class.from_dict(_A )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ : str = None, None, None
__magic_name__ : int = use_timm_backbone
__magic_name__ : Optional[Any] = backbone_config
__magic_name__ : int = num_channels
__magic_name__ : int = num_queries
__magic_name__ : Any = d_model
__magic_name__ : List[Any] = encoder_ffn_dim
__magic_name__ : List[Any] = encoder_layers
__magic_name__ : str = encoder_attention_heads
__magic_name__ : Optional[Any] = decoder_ffn_dim
__magic_name__ : Optional[int] = decoder_layers
__magic_name__ : List[str] = decoder_attention_heads
__magic_name__ : Optional[int] = dropout
__magic_name__ : Optional[int] = attention_dropout
__magic_name__ : Optional[int] = activation_dropout
__magic_name__ : List[str] = activation_function
__magic_name__ : Any = init_std
__magic_name__ : Union[str, Any] = init_xavier_std
__magic_name__ : Union[str, Any] = encoder_layerdrop
__magic_name__ : List[str] = decoder_layerdrop
__magic_name__ : List[Any] = encoder_layers
__magic_name__ : Tuple = auxiliary_loss
__magic_name__ : int = position_embedding_type
__magic_name__ : Union[str, Any] = backbone
__magic_name__ : Dict = use_pretrained_backbone
__magic_name__ : Optional[int] = dilation
# Hungarian matcher
__magic_name__ : Tuple = class_cost
__magic_name__ : List[Any] = bbox_cost
__magic_name__ : int = giou_cost
# Loss coefficients
__magic_name__ : Any = mask_loss_coefficient
__magic_name__ : Any = dice_loss_coefficient
__magic_name__ : List[Any] = bbox_loss_coefficient
__magic_name__ : Tuple = giou_loss_coefficient
__magic_name__ : Union[str, Any] = eos_coefficient
super().__init__(is_encoder_decoder=_A , **_A )
@property
def __lowerCAmelCase ( self : int ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self : Dict ) -> int:
return self.d_model
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[Any] = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowerCAmelCase ( self : Any ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self : List[str] ) -> int:
return 12
| 275
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase :Optional[int] = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__magic_name__ : Tuple = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase , id=lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
if exitstatus == 5:
__magic_name__ : Any = 0
# Doctest custom flag to ignore output.
lowerCAmelCase :List[str] = doctest.register_optionflag('''IGNORE_RESULT''')
lowerCAmelCase :Union[str, Any] = doctest.OutputChecker
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Tuple , _A : Tuple , _A : str ) -> int:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _A , _A , _A )
lowerCAmelCase :Optional[Any] = CustomOutputChecker
lowerCAmelCase :int = HfDoctestModule
lowerCAmelCase :Any = HfDocTestParser
| 275
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
_lowercase: int
_lowercase: TreeNode | None = None
_lowercase: TreeNode | None = None
A__ : Tuple =namedtuple('''CoinsDistribResult''', '''moves excess''')
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(lowerCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCAmelCase ) != count_coins(lowerCAmelCase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(lowerCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCAmelCase , _lowerCAmelCase = get_distrib(node.left )
_lowerCAmelCase , _lowerCAmelCase = get_distrib(node.right )
_lowerCAmelCase = 1 - left_distrib_excess
_lowerCAmelCase = 1 - right_distrib_excess
_lowerCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCAmelCase )
+ abs(lowerCAmelCase )
)
_lowerCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCAmelCase , lowerCAmelCase )
return get_distrib(lowerCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
|
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__ = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__ = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 150
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict=1_3 , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , __lowerCamelCase : List[str]=False , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : str=9_9 , __lowerCamelCase : int=0 , __lowerCamelCase : Tuple=3_2 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[Any]=5_1_2 , __lowerCamelCase : List[Any]=1_2 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : Any=3 , __lowerCamelCase : int=4 , __lowerCamelCase : Optional[Any]="last" , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_lengths
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = gelu_activation
_SCREAMING_SNAKE_CASE = sinusoidal_embeddings
_SCREAMING_SNAKE_CASE = causal
_SCREAMING_SNAKE_CASE = asm
_SCREAMING_SNAKE_CASE = n_langs
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = n_special
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = summary_type
_SCREAMING_SNAKE_CASE = use_proj
_SCREAMING_SNAKE_CASE = scope
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_input_lengths:
_SCREAMING_SNAKE_CASE = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , 2 ).float()
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Any , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FlaubertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , lengths=__lowerCamelCase , langs=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , langs=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FlaubertWithLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FlaubertForQuestionAnsweringSimple(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FlaubertForQuestionAnswering(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , p_mask=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , )
((_SCREAMING_SNAKE_CASE), ) = result_with_labels.to_tuple()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
((_SCREAMING_SNAKE_CASE), ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : str , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FlaubertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = FlaubertForTokenClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = FlaubertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
), (
_SCREAMING_SNAKE_CASE
), (
_SCREAMING_SNAKE_CASE
), (
_SCREAMING_SNAKE_CASE
), (
_SCREAMING_SNAKE_CASE
), (
_SCREAMING_SNAKE_CASE
), (
_SCREAMING_SNAKE_CASE
), (
_SCREAMING_SNAKE_CASE
), (
_SCREAMING_SNAKE_CASE
),
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ ( A , A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase_ ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[str]=False ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FlaubertModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , emb_dim=3_7 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowerCamelCase )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCamelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowerCamelCase )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCamelCase )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__lowerCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCamelCase )
@slow
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@slow
@require_torch_gpu
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.jit.trace(
__lowerCamelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCamelCase , os.path.join(__lowerCamelCase , "traced_model.pt" ) )
_SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(__lowerCamelCase , "traced_model.pt" ) , map_location=__lowerCamelCase )
loaded(inputs_dict["input_ids"].to(__lowerCamelCase ) , inputs_dict["attention_mask"].to(__lowerCamelCase ) )
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
| 111
|
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Tuple , __A : List[str] , __A : List[str] ) -> List[Any]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_SCREAMING_SNAKE_CASE = TOKENIZER_CLASSES
else:
_SCREAMING_SNAKE_CASE = {tokenizer_name: getattr(__A , tokenizer_name + "Fast" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_SCREAMING_SNAKE_CASE = TOKENIZER_CLASSES[tokenizer_name]
_SCREAMING_SNAKE_CASE = True
if checkpoint_name is None:
_SCREAMING_SNAKE_CASE = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_SCREAMING_SNAKE_CASE = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(__A , force_download=__A )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = checkpoint.split("/" )
_SCREAMING_SNAKE_CASE = os.path.join(__A , __A )
elif add_prefix:
_SCREAMING_SNAKE_CASE = checkpoint
_SCREAMING_SNAKE_CASE = dump_path
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_SCREAMING_SNAKE_CASE = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_SCREAMING_SNAKE_CASE = file_path.split(__A )[-1][0]
if next_char == "/":
_SCREAMING_SNAKE_CASE = os.path.join(__A , __A )
_SCREAMING_SNAKE_CASE = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_SCREAMING_SNAKE_CASE = tokenizer.save_pretrained(
__A , legacy_format=__A , filename_prefix=__A )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__A )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowerCamelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 111
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = 'bloom'
__UpperCAmelCase : Dict = ['past_key_values']
__UpperCAmelCase : Union[str, Any] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__(self : Dict , __UpperCAmelCase : Tuple=2_5_0_8_8_0 , __UpperCAmelCase : Union[str, Any]=6_4 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Tuple=8 , __UpperCAmelCase : str=1E-5 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : str=2 , __UpperCAmelCase : int=False , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : str=False , **__UpperCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase__ = kwargs.pop("n_embed" , __UpperCAmelCase )
UpperCAmelCase__ = hidden_size if n_embed is None else n_embed
UpperCAmelCase__ = n_layer
UpperCAmelCase__ = n_head
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = pretraining_tp
UpperCAmelCase__ = apply_residual_connection_post_layernorm
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = bos_token_id
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = slow_but_exact
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = version.parse('1.12' )
def __init__(self : Dict , __UpperCAmelCase : PretrainedConfig , __UpperCAmelCase : str = "default" , __UpperCAmelCase : List[PatchingSpec] = None , __UpperCAmelCase : bool = False , ) -> Optional[int]:
"""simple docstring"""
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , "pad_token_id" , __UpperCAmelCase ):
# TODO: how to do that better?
UpperCAmelCase__ = 0
@property
def lowercase_ (self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCAmelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" , inverted_values_shape=__UpperCAmelCase )
UpperCAmelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCAmelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowercase_ (self : Dict ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def lowercase_ (self : Any ) -> float:
"""simple docstring"""
return 1E-3
def lowercase_ (self : int , __UpperCAmelCase : "PreTrainedTokenizer" , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase__ = seqlen + 2
UpperCAmelCase__ = self._config.hidden_size // self.num_attention_heads
UpperCAmelCase__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCAmelCase__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCAmelCase__ = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
UpperCAmelCase__ = common_inputs["attention_mask"]
if self.use_past:
UpperCAmelCase__ = ordered_inputs["attention_mask"].dtype
UpperCAmelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase_ (self : Optional[Any] ) -> int:
"""simple docstring"""
return 1_3
| 65
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __a ( A__ ):
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
UpperCamelCase__ : Any = 8
# DPR tok
UpperCamelCase__ : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
UpperCamelCase__ : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ : int = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase__ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ : Union[str, Any] = {"unk_token": "<unk>"}
UpperCamelCase__ : Dict = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ : str = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : int ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = self.get_dummy_dataset()
UpperCamelCase__ : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCamelCase__ : str = dataset
UpperCamelCase__ : Optional[int] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : bool ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.get_dummy_dataset()
UpperCamelCase__ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , "dataset" )
UpperCamelCase__ : List[str] = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
UpperCamelCase__ : str = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCamelCase__ : List[Any] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE ) , )
return retriever
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : int = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase__ : List[str] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
UpperCamelCase__ : Tuple = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE , open(SCREAMING_SNAKE_CASE , "wb" ) )
UpperCamelCase__ : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
UpperCamelCase__ : List[str] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Tuple = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCamelCase__ : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Any = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : int = 1
UpperCamelCase__ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Any = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : int = 1
UpperCamelCase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Union[str, Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[str] = 1
UpperCamelCase__ : Any = self.get_dummy_legacy_index_retriever()
UpperCamelCase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : int = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __lowercase ( self : int ):
'''simple docstring'''
import torch
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : Optional[int] = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase__ : Optional[Any] = [[5, 7], [10, 11]]
UpperCamelCase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : int = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
UpperCamelCase__ : List[Any] = retriever(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = [[5, 7], [10, 11]]
UpperCamelCase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Optional[int] = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(
len(SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 189
| 0
|
"""simple docstring"""
def UpperCAmelCase ( ):
'''simple docstring'''
return 1
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(a_ )
def UpperCAmelCase ( a_ = 200 ):
'''simple docstring'''
return two_pound(a_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 367
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : int = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase : Tuple = key.replace('module.encoder', 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase : str = key.replace('module.decoder', 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase : Any = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase : Dict = key.replace(F"""patch_embed{idx}""", F"""patch_embeddings.{int(a_ )-1}""" )
if "norm" in key:
lowerCamelCase : Optional[int] = key.replace('norm', 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase : List[str] = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase : List[str] = key.replace(F"""layer_norm{idx}""", F"""layer_norm.{int(a_ )-1}""" )
if "layer_norm1" in key:
lowerCamelCase : List[Any] = key.replace('layer_norm1', 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase : str = key.replace('layer_norm2', 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase : Union[str, Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase : List[str] = key.replace(F"""block{idx}""", F"""block.{int(a_ )-1}""" )
if "attn.q" in key:
lowerCamelCase : Union[str, Any] = key.replace('attn.q', 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase : Dict = key.replace('attn.proj', 'attention.output.dense' )
if "attn" in key:
lowerCamelCase : int = key.replace('attn', 'attention.self' )
if "fc1" in key:
lowerCamelCase : Any = key.replace('fc1', 'dense1' )
if "fc2" in key:
lowerCamelCase : List[Any] = key.replace('fc2', 'dense2' )
if "linear_pred" in key:
lowerCamelCase : Optional[Any] = key.replace('linear_pred', 'classifier' )
if "linear_fuse" in key:
lowerCamelCase : Union[str, Any] = key.replace('linear_fuse.conv', 'linear_fuse' )
lowerCamelCase : Optional[int] = key.replace('linear_fuse.bn', 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase : str = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase : List[Any] = key.replace(F"""linear_c{idx}""", F"""linear_c.{int(a_ )-1}""" )
if "bot_conv" in key:
lowerCamelCase : int = key.replace('bot_conv', '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase : Any = key.replace('skip_conv1', '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase : Optional[Any] = key.replace('skip_conv2', '2.convolution' )
if "fusion1" in key:
lowerCamelCase : str = key.replace('fusion1', '1.fusion' )
if "fusion2" in key:
lowerCamelCase : Optional[Any] = key.replace('fusion2', '2.fusion' )
if "fusion3" in key:
lowerCamelCase : List[str] = key.replace('fusion3', '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase : Optional[int] = key.replace('conv', 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase : Tuple = key.replace('module.last_layer_depth', 'head.head' )
lowerCamelCase : List[Any] = value
return new_state_dict
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase : Any = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase : List[Any] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase : Dict = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase : List[Any] = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
@torch.no_grad()
def UpperCAmelCase ( a_, a_, a_=False, a_=None ):
'''simple docstring'''
lowerCamelCase : int = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase : Any = GLPNImageProcessor()
# prepare image
lowerCamelCase : int = prepare_img()
lowerCamelCase : Tuple = image_processor(images=a_, return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase : Optional[Any] = torch.load(a_, map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase : Any = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_, a_ )
# create HuggingFace model and load state dict
lowerCamelCase : Optional[int] = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCamelCase : str = model(a_ )
lowerCamelCase : Any = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase : Any = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
lowerCamelCase : str = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase : int = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization='nielsr', commit_message='Add model', use_temp_dir=a_, )
image_processor.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization='nielsr', commit_message='Add image processor', use_temp_dir=a_, )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
_A = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 205
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
__lowerCamelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , x.transpose() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , np.asarray(transpose(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.reshape(lowerCamelCase__ , (4, 3) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.reshape(lowerCamelCase__ , (12, 5) ) ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.asarray(reshape(lowerCamelCase__ , (4, 3) ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.asarray(reshape(lowerCamelCase__ , (12, 5) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.squeeze(lowerCamelCase__ ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.squeeze(lowerCamelCase__ , axis=2 ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.asarray(squeeze(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.asarray(squeeze(lowerCamelCase__ , axis=2 ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.expand_dims(lowerCamelCase__ , axis=1 ) ) )
@require_torch
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.asarray(expand_dims(lowerCamelCase__ , axis=1 ) ) ) )
| 90
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=16 , lowerCamelCase__=[32, 64, 128] , lowerCamelCase__=[1, 2, 1] , lowerCamelCase__=[2, 2, 4] , lowerCamelCase__=2 , lowerCamelCase__=2.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=10 , lowerCamelCase__=8 , lowerCamelCase__=["stage1", "stage2"] , lowerCamelCase__=[1, 2] , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = patch_norm
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = is_training
__lowerCamelCase = scope
__lowerCamelCase = use_labels
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = encoder_stride
__lowerCamelCase = out_features
__lowerCamelCase = out_indices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__lowerCamelCase = None
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.type_sequence_label_size
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> str:
'''simple docstring'''
return
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# FocalNet has a different seq_length
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowerCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = reshaped_hidden_states[0].shape
__lowerCamelCase = (
reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = FocalNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FocalNetBackbone,) if is_torch_available() else ()
snake_case_ = FocalNetConfig
snake_case_ = False
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
| 90
| 1
|
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase_ = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
lowerCamelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCamelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCamelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Optional[int] ) -> Tuple:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=0.9 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Union[str, Any]=0.5 ) -> str:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase_ : Dict = [
meteor_score.single_meteor_score(
word_tokenize(lowercase_ ) , word_tokenize(lowercase_ ) , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
else:
UpperCAmelCase_ : Dict = [
meteor_score.single_meteor_score(lowercase_ , lowercase_ , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
return {"meteor": np.mean(lowercase_ )}
| 356
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Collection[float] | None = None ) -> None:
if components is None:
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[Any] = list(lowerCAmelCase_ )
def __len__( self : Union[str, Any] ) -> int:
return len(self.__components )
def __str__( self : List[str] ) -> str:
return "(" + ",".join(map(lowerCAmelCase_ , self.__components ) ) + ")"
def __add__( self : Dict , lowerCAmelCase_ : Vector ) -> Vector:
UpperCAmelCase_ : Optional[int] = len(self )
if size == len(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[Any] = [self.__components[i] + other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else:
raise Exception("must have the same size" )
def __sub__( self : List[str] , lowerCAmelCase_ : Vector ) -> Vector:
UpperCAmelCase_ : List[str] = len(self )
if size == len(lowerCAmelCase_ ):
UpperCAmelCase_ : List[Any] = [self.__components[i] - other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : Any , lowerCAmelCase_ : float ) -> Vector:
...
@overload
def __mul__( self : Optional[int] , lowerCAmelCase_ : Vector ) -> float:
...
def __mul__( self : Dict , lowerCAmelCase_ : float | Vector ) -> float | Vector:
if isinstance(lowerCAmelCase_ , (float, int) ):
UpperCAmelCase_ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(self ) == len(lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = len(self )
UpperCAmelCase_ : Dict = [self.__components[i] * other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return sum(lowerCAmelCase_ )
else: # error case
raise Exception("invalid operand!" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Vector:
return Vector(self.__components )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int ) -> float:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
UpperCAmelCase_ : List[str] = value
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
UpperCAmelCase_ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Vector , lowerCAmelCase_ : bool = False ) -> float:
UpperCAmelCase_ : int = self * other
UpperCAmelCase_ : Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case ( A__ ):
assert isinstance(A__ ,A__ )
return Vector([0] * dimension )
def snake_case ( A__ ,A__ ):
assert isinstance(A__ ,A__ ) and (isinstance(A__ ,A__ ))
UpperCAmelCase_ : Any = [0] * dimension
UpperCAmelCase_ : Dict = 1
return Vector(A__ )
def snake_case ( A__ ,A__ ,A__ ):
assert (
isinstance(A__ ,A__ )
and isinstance(A__ ,A__ )
and (isinstance(A__ ,(int, float) ))
)
return x * scalar + y
def snake_case ( A__ ,A__ ,A__ ):
random.seed(A__ )
UpperCAmelCase_ : Tuple = [random.randint(A__ ,A__ ) for _ in range(A__ )]
return Vector(A__ )
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : list[list[float]] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : List[Any] = matrix
UpperCAmelCase_ : List[Any] = w
UpperCAmelCase_ : List[Any] = h
def __str__( self : int ) -> str:
UpperCAmelCase_ : Tuple = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Any , lowerCAmelCase_ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase_ : List[Any] = []
for i in range(self.__height ):
UpperCAmelCase_ : Optional[Any] = [
self.__matrix[i][j] + other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Optional[int] , lowerCAmelCase_ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.__height ):
UpperCAmelCase_ : Union[str, Any] = [
self.__matrix[i][j] - other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Tuple , lowerCAmelCase_ : float ) -> Matrix:
...
@overload
def __mul__( self : Tuple , lowerCAmelCase_ : Vector ) -> Vector:
...
def __mul__( self : Any , lowerCAmelCase_ : float | Vector ) -> Vector | Matrix:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # matrix-vector
if len(lowerCAmelCase_ ) == self.__width:
UpperCAmelCase_ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
UpperCAmelCase_ : Any = [
self.__matrix[i][j] * other.component(lowerCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase_ , sum(lowerCAmelCase_ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(lowerCAmelCase_ , (int, float) ): # matrix-scalar
UpperCAmelCase_ : int = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
return None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.__height
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.__width
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCAmelCase_ : List[Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
UpperCAmelCase_ : Optional[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : Union[str, Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise Exception("Indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCAmelCase_ : List[Any] = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase_ ) for y in range(self.__width )
]
return sum(lowerCAmelCase_ )
def snake_case ( A__ ):
UpperCAmelCase_ : list[list[float]] = [[0] * n for _ in range(A__ )]
return Matrix(A__ ,A__ ,A__ )
def snake_case ( A__ ,A__ ,A__ ,A__ ):
random.seed(A__ )
UpperCAmelCase_ : list[list[float]] = [
[random.randint(A__ ,A__ ) for _ in range(A__ )] for _ in range(A__ )
]
return Matrix(A__ ,A__ ,A__ )
| 253
| 0
|
from manim import *
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Tuple = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_ : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_ : Optional[Any] = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase_ : Dict = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : List[str] = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : List[Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCAmelCase_ : List[Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCAmelCase_ : int = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCAmelCase_ : Optional[Any] = Text('CPU' , font_size=2_4 )
UpperCAmelCase_ : Union[str, Any] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
UpperCAmelCase_ : str = [mem.copy() for i in range(4 )]
UpperCAmelCase_ : List[str] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCAmelCase_ : Any = Text('GPU' , font_size=2_4 )
UpperCAmelCase_ : int = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
UpperCAmelCase_ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : int = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCAmelCase_ : Any = Text('Model' , font_size=2_4 )
UpperCAmelCase_ : Optional[int] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Any = []
for i, rect in enumerate(__lowerCAmelCase ):
UpperCAmelCase_ : Union[str, Any] = fill.copy().set_fill(__lowerCAmelCase , opacity=0.8 )
target.move_to(__lowerCAmelCase )
model_arr.append(__lowerCAmelCase )
UpperCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
UpperCAmelCase_ : str = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_ : Tuple = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_ : Dict = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCAmelCase_ : int = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCAmelCase_ : Any = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCAmelCase_ : str = Text('Disk' , font_size=2_4 )
UpperCAmelCase_ : List[str] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase_ : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_ : List[Any] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase_ : List[str] = MarkupText(
f"<span fgcolor=\'{BLUE}\'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
UpperCAmelCase_ : Any = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ) )
UpperCAmelCase_ : Tuple = Square(0.3 )
input.set_fill(__lowerCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __lowerCAmelCase , buff=0.5 )
self.play(Write(__lowerCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__lowerCAmelCase , buff=0.02 )
self.play(MoveToTarget(__lowerCAmelCase ) )
self.play(FadeOut(__lowerCAmelCase ) )
UpperCAmelCase_ : List[str] = Arrow(start=__lowerCAmelCase , end=__lowerCAmelCase , color=__lowerCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __lowerCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCAmelCase_ : Any = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
UpperCAmelCase_ : Union[str, Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__lowerCAmelCase ) , Circumscribe(model_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCAmelCase_ : str = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __lowerCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCAmelCase_ : Dict = AnimationGroup(
FadeOut(__lowerCAmelCase , run_time=0.5 ) , MoveToTarget(__lowerCAmelCase , run_time=0.5 ) , FadeIn(__lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__lowerCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCAmelCase_ : Union[str, Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCAmelCase_ : Dict = a_c
UpperCAmelCase_ : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__lowerCAmelCase ) , FadeOut(__lowerCAmelCase , run_time=0.5 ) , )
UpperCAmelCase_ : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , MoveToTarget(__lowerCAmelCase ) )
self.wait()
| 29
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict:
"""simple docstring"""
A__ = multiprocessing.Manager()
A__ = manager.list()
A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ = shutil.rmtree
A__ = os.rmdir
A__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ = {}
with swallow_io():
with time_limit(__a ):
exec(__a , __a )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
A__ = rmtree
A__ = rmdir
A__ = chdir
@contextlib.contextmanager
def __lowerCamelCase ( __a :List[str] ) -> Dict:
"""simple docstring"""
def signal_handler(__a :List[Any] , __a :Optional[Any] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , __a )
signal.signal(signal.SIGALRM , __a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(__a ):
with contextlib.redirect_stderr(__a ):
with redirect_stdin(__a ):
yield
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__a ):
yield dirname
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A (io.StringIO ):
'''simple docstring'''
def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int:
"""simple docstring"""
raise OSError
def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return False
class A (contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''stdin'''
@contextlib.contextmanager
def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]:
"""simple docstring"""
if root == ".":
yield
return
A__ = os.getcwd()
os.chdir(__a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__a )
def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ = None
A__ = None
import os
A__ = """1"""
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
import shutil
A__ = None
A__ = None
A__ = None
import subprocess
A__ = None # type: ignore
A__ = None
import sys
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
| 274
| 0
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A(__a: Tuple ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def A(__a: int ):
class __magic_name__ :
def __init__( self , _a ) -> List[str]:
lowerCAmelCase_ = metric_id
class __magic_name__ :
lowerCamelCase__ = [MetricMock(__lowercase ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def __a ( self ) -> Tuple:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def A(__a: str , __a: Tuple , __a: Tuple , __a: int , __a: Union[str, Any] ):
if "tmp_path" in args:
lowerCAmelCase_ = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__a , match="https://huggingface.co/docs/evaluate" ):
func(*__a )
| 22
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase__ = Features({"""text""": Value("""string""" )} )
lowerCAmelCase__ = Features({"""summary""": Value("""string""" )} )
lowerCAmelCase__ = "text"
lowerCAmelCase__ = "summary"
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return {self.text_column: "text", self.summary_column: "summary"}
| 111
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 111
| 1
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
SCREAMING_SNAKE_CASE__ = "."
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
with open(doctest_file_path) as fp:
for line in fp:
SCREAMING_SNAKE_CASE__ = line.strip()
SCREAMING_SNAKE_CASE__ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
SCREAMING_SNAKE_CASE__ = "\n".join(non_existent_paths)
raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 149
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 149
| 1
|
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, "src", "transformers")
UpperCAmelCase__ = "\n{0} = None\n"
UpperCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
UpperCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")')
self.assertIsNone(A)
_UpperCAmelCase = find_backend(' if not is_tokenizers_available():')
self.assertEqual(A , 'tokenizers')
_UpperCAmelCase = find_backend(' if not is_tensorflow_text_available():')
self.assertEqual(A , 'tensorflow_text')
_UpperCAmelCase = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):')
self.assertEqual(A , 'sentencepiece_and_tokenizers')
_UpperCAmelCase = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):')
self.assertEqual(A , 'sentencepiece_and_tensorflow_text')
_UpperCAmelCase = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):')
self.assertEqual(A , 'sentencepiece_and_tokenizers_and_vision')
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A)
self.assertIn('tensorflow_text' , A)
self.assertIn('sentencepiece_and_tokenizers' , A)
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'])
self.assertIn('TFBertModel' , objects['tf'])
self.assertIn('FlaxBertModel' , objects['flax'])
self.assertIn('BertModel' , objects['torch'])
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'])
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'])
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(A , '\nCONSTANT = None\n')
_UpperCAmelCase = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
_UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(A , A)
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
_UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , A)
| 339
|
import os
def a ( A__ : str = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as input_file:
_lowercase =[
[int(A__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
_lowercase =len(A__ )
_lowercase =len(matrix[0] )
_lowercase =[[-1 for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
_lowercase =matrix[i][0]
for j in range(1 , A__ ):
for i in range(A__ ):
_lowercase =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , A__ ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 205
| 0
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCAmelCase_ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
lowerCAmelCase_ = logging.WARNING
def lowerCamelCase_ ( )-> Dict:
_snake_case : Dict = os.getenv('DATASETS_VERBOSITY' , lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowerCamelCase_ ( )-> str:
return __name__.split('.' )[0]
def lowerCamelCase_ ( )-> logging.Logger:
return logging.getLogger(_get_library_name() )
def lowerCamelCase_ ( )-> None:
# Apply our default configuration to the library root logger.
_snake_case : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCamelCase_ ( )-> None:
_snake_case : Dict = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCamelCase_ ( lowerCAmelCase: Optional[str] = None )-> logging.Logger:
if name is None:
_snake_case : List[str] = _get_library_name()
return logging.getLogger(lowerCAmelCase )
def lowerCamelCase_ ( )-> int:
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase_ ( lowerCAmelCase: int )-> None:
_get_library_root_logger().setLevel(lowerCAmelCase )
def lowerCamelCase_ ( )-> Any:
return set_verbosity(lowerCAmelCase )
def lowerCamelCase_ ( )-> Tuple:
return set_verbosity(lowerCAmelCase )
def lowerCamelCase_ ( )-> Tuple:
return set_verbosity(lowerCAmelCase )
def lowerCamelCase_ ( )-> Dict:
return set_verbosity(lowerCAmelCase )
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = False
def lowerCamelCase_ ( )-> None:
_snake_case : Union[str, Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ): # pylint: disable=unused-argument
'''simple docstring'''
_snake_case : Union[str, Any] = args[0] if args else None
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
def empty_fn(*UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ):
'''simple docstring'''
return self
def __exit__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
return
lowerCAmelCase_ = True
class _lowerCAmelCase :
'''simple docstring'''
def __call__( self : Optional[Any] , *UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Any ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase , **UpperCamelCase )
else:
return EmptyTqdm(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase_ = _tqdm_cls()
def lowerCamelCase_ ( )-> bool:
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase_ ( )-> Optional[Any]:
global _tqdm_active
_snake_case : Optional[int] = True
def lowerCamelCase_ ( )-> Dict:
global _tqdm_active
_snake_case : Tuple = False
| 260
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =RobertaTokenizer
a_ : Tuple =RobertaTokenizerFast
a_ : Union[str, Any] =True
a_ : List[Any] ={"""cls_token""": """<s>"""}
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case : Optional[int] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case : List[str] = {'unk_token': '<unk>'}
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : List[str] , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = 'lower newer'
_snake_case : int = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : List[str] = 'lower newer'
_snake_case : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case : Any = tokenizer.tokenize(UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Any = tokens + [tokenizer.unk_token]
_snake_case : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained('roberta-base' )
_snake_case : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase )
_snake_case : int = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase )
_snake_case : Dict = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : int = 'Encode this sequence.'
_snake_case : str = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_snake_case : int = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase , UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_snake_case : List[Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
# Testing spaces after special tokens
_snake_case : Dict = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase )} ) # mask token has a left space
_snake_case : int = tokenizer.convert_tokens_to_ids(UpperCamelCase )
_snake_case : List[Any] = 'Encode <mask> sequence'
_snake_case : Any = 'Encode <mask>sequence'
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase )
_snake_case : str = encoded.index(UpperCamelCase )
_snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Tuple = tokenizer.encode(UpperCamelCase )
_snake_case : Tuple = encoded.index(UpperCamelCase )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : Tuple = 'A, <mask> AllenNLP sentence.'
_snake_case : str = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
_snake_case : Optional[int] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : List[str] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case : Tuple = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : int = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Tuple = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Optional[int] = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Tuple = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : str = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Dict = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ) + 1, 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : str = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Any = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
| 260
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__a = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
|
import os
def A_ ( a = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
SCREAMING_SNAKE_CASE_ : Dict = in_file.read()
SCREAMING_SNAKE_CASE_ : Dict = [[int(a ) for cell in row.split(',' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE_ : str = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE_ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE_ : Any = [[0 for i in range(a )] for j in range(a )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = grid[0][0]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE_ : Dict = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }')
| 253
| 0
|
def lowerCamelCase ( a_ , a_ ) -> int:
return int(input_a == input_a == 0 )
def lowerCamelCase ( ) -> None:
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int:
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
'''simple docstring'''
__a: Tuple = OPTConfig
__a: Optional[Any] = {}
__a: Tuple = '''gelu'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = word_embed_proj_dim
lowerCAmelCase_ = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def _lowercase ( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel(config=lowercase_ )
lowerCAmelCase_ = inputs_dict['input_ids']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0]
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a: Union[str, Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a: int = False
__a: List[Any] = False
__a: Dict = False
__a: List[Any] = 1_0
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase_ = model_class(config=lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
lowerCAmelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
lowerCAmelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
def lowerCamelCase ( a_ ) -> Any:
return tf.constant(a_ , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = 9_9
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
lowerCAmelCase_ = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowercase_ )
lowerCAmelCase_ = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = 'facebook/opt-350m'
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase_ = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-125m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase_ = 'left'
# use different length sentences to test batching
lowerCAmelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
lowerCAmelCase_ = inputs['input_ids']
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] )
lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ )
lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 14
| 1
|
'''simple docstring'''
from torch import nn
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> int:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 22
|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected
| 22
| 1
|
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = [[0 for _ in range(__A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__UpperCamelCase = 1
for n in range(m + 1 ):
for k in range(1 ,__A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
a__ : List[Any] = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
a__ : Tuple = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 243
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Union[str, Any]:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> List[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> Optional[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Optional[int]:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Tuple:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> Dict:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 243
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class _a ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env" )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Any ):
'''simple docstring'''
UpperCamelCase__: Dict = {
"enabled": True,
"processes_per_host": 8,
}
UpperCamelCase__: List[str] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
UpperCamelCase__: int = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
UpperCamelCase__: List[str] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
TrainingJobAnalytics(__lowerCamelCase ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(1,)] )
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: List[str] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.create_estimator(__lowerCamelCase )
# run training
estimator.fit()
# result dataframe
UpperCamelCase__: Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase__: Any = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCamelCase__: Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase__: Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase )
| 149
|
from torch import nn
def lowerCAmelCase_ ( A_):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}")
| 149
| 1
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="attention" ):
'''simple docstring'''
__lowerCAmelCase = __lowerCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
__lowerCAmelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__lowerCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
__lowerCAmelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__lowerCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
__lowerCAmelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__lowerCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
__lowerCAmelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
if split_mlp_wi:
__lowerCAmelCase = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
__lowerCAmelCase = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
__lowerCAmelCase = (wi_a, wi_a)
else:
__lowerCAmelCase = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
__lowerCAmelCase = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def _lowerCamelCase ( _UpperCamelCase , *, _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ):
'''simple docstring'''
__lowerCAmelCase = traverse_util.flatten_dict(variables["target"] )
__lowerCAmelCase = {"/".join(_UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowerCAmelCase = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , _UpperCamelCase )
__lowerCAmelCase = collections.OrderedDict()
# Shared embeddings.
__lowerCAmelCase = old["token_embedder/embedding"]
# Encoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
__lowerCAmelCase = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "pre_attention_layer_norm" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "attention" )
__lowerCAmelCase = layer_norm
__lowerCAmelCase = k.T
__lowerCAmelCase = o.T
__lowerCAmelCase = q.T
__lowerCAmelCase = v.T
# Block i, layer 1 (MLP).
__lowerCAmelCase = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "pre_mlp_layer_norm" )
__lowerCAmelCase , __lowerCAmelCase = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , _UpperCamelCase )
__lowerCAmelCase = layer_norm
if split_mlp_wi:
__lowerCAmelCase = wi[0].T
__lowerCAmelCase = wi[1].T
else:
__lowerCAmelCase = wi.T
__lowerCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowerCAmelCase = tax_relpos_bias_lookup(
_UpperCamelCase , _UpperCamelCase , "encoder" ).T
__lowerCAmelCase = old["encoder/encoder_norm/scale"]
if not scalable_attention:
__lowerCAmelCase = tax_relpos_bias_lookup(
_UpperCamelCase , 0 , "encoder" ).T
__lowerCAmelCase = tax_relpos_bias_lookup(
_UpperCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
__lowerCAmelCase = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_self_attention_layer_norm" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "self_attention" )
__lowerCAmelCase = layer_norm
__lowerCAmelCase = k.T
__lowerCAmelCase = o.T
__lowerCAmelCase = q.T
__lowerCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
__lowerCAmelCase = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "encoder_decoder_attention" )
__lowerCAmelCase = layer_norm
__lowerCAmelCase = k.T
__lowerCAmelCase = o.T
__lowerCAmelCase = q.T
__lowerCAmelCase = v.T
# Block i, layer 2 (MLP).
__lowerCAmelCase = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_mlp_layer_norm" )
__lowerCAmelCase , __lowerCAmelCase = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , _UpperCamelCase )
__lowerCAmelCase = layer_norm
if split_mlp_wi:
__lowerCAmelCase = wi[0].T
__lowerCAmelCase = wi[1].T
else:
__lowerCAmelCase = wi.T
__lowerCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowerCAmelCase = tax_relpos_bias_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" ).T
__lowerCAmelCase = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowerCAmelCase = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowerCAmelCase = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowerCAmelCase = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__lowerCAmelCase = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = checkpoints.load_tax_checkpoint(_UpperCamelCase )
__lowerCAmelCase = convert_tax_to_pytorch(
_UpperCamelCase , num_layers=config.num_layers , is_encoder_only=_UpperCamelCase , scalable_attention=_UpperCamelCase )
__lowerCAmelCase = make_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = False , ):
'''simple docstring'''
__lowerCAmelCase = MTaConfig.from_json_file(_UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowerCAmelCase = UMTaEncoderModel(_UpperCamelCase )
else:
__lowerCAmelCase = UMTaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCamelCase )
print("Done" )
if __name__ == "__main__":
A : str = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
A : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 259
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A : Optional[int] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=16 , __a=13 , __a=7 , __a=14 , __a=10 , __a=19 , __a=5 , __a=4 , __a=True , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=[1, 2, 3, 4, 5] , __a=25 , __a=5 , ):
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def snake_case ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def snake_case ( self , __a ):
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def snake_case ( self ):
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self , __a , __a ):
__lowerCAmelCase = AutoformerModel(config=__a ).to(__a ).eval()
__lowerCAmelCase = model(**__a )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(__a )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**__a )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(__a )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowerCAmelCase = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase : List[Any] =(AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase : Tuple ={"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : Any =False
__UpperCAmelCase : Dict =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Optional[Any] =False
def snake_case ( self ):
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["missing_keys"] , [] )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="Model has no tokens embeddings" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase = inspect.signature(getattr(__a , "forward" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , "seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "decoder_seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "encoder_seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "d_model" , __a )
__lowerCAmelCase = getattr(self.model_tester , "num_attention_heads" , __a )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(__a )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def snake_case ( self ):
super().test_retain_grad_hidden_states_attentions()
def _lowerCamelCase ( _UpperCamelCase="train-batch.pt" ):
'''simple docstring'''
__lowerCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_UpperCamelCase , repo_type="dataset" )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
return batch
@require_torch
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def snake_case ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def snake_case ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowerCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=__a )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1e-1 ) )
| 259
| 1
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , 0 , -1 ):
_UpperCAmelCase = False
for j in range(_SCREAMING_SNAKE_CASE , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_UpperCAmelCase , _UpperCAmelCase = unsorted[j - 1], unsorted[j]
_UpperCAmelCase = True
for j in range(_SCREAMING_SNAKE_CASE ):
if unsorted[j] > unsorted[j + 1]:
_UpperCAmelCase , _UpperCAmelCase = unsorted[j + 1], unsorted[j]
_UpperCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
__A : Any = [int(item) for item in user_input.split(",")]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 260
|
"""simple docstring"""
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_UpperCAmelCase = 6
_UpperCAmelCase = 1
_UpperCAmelCase = 1901
_UpperCAmelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_UpperCAmelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_UpperCAmelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_UpperCAmelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
_UpperCAmelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 260
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCamelCase_ ( a_ ):
_A : Dict = 'unispeech'
def __init__( self , snake_case__=32 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=1_28 , snake_case__=16 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__=3_20 , snake_case__=2 , snake_case__=0.1 , snake_case__=1_00 , snake_case__=2_56 , snake_case__=2_56 , snake_case__=0.1 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=2_56 , snake_case__=80 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=0.5 , **snake_case__ , ) -> Dict:
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_norm
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layerdrop
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = num_ctc_classes
UpperCAmelCase = vocab_size
UpperCAmelCase = do_stable_layer_norm
UpperCAmelCase = use_weighted_layer_sum
UpperCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase = num_codevectors_per_group
UpperCAmelCase = num_codevector_groups
UpperCAmelCase = contrastive_logits_temperature
UpperCAmelCase = feat_quantizer_dropout
UpperCAmelCase = num_negatives
UpperCAmelCase = codevector_dim
UpperCAmelCase = proj_codevector_dim
UpperCAmelCase = diversity_loss_weight
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# pretraining loss
UpperCAmelCase = replace_prob
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 248
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase , lowerCAmelCase )
# Predict target for test data
UpperCAmelCase = xgb.predict(lowerCAmelCase )
UpperCAmelCase = predictions.reshape(len(lowerCAmelCase ) , 1 )
return predictions
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = fetch_california_housing()
UpperCAmelCase , UpperCAmelCase = data_handling(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_test_split(
lowerCAmelCase , lowerCAmelCase , test_size=0.25 , random_state=1 )
UpperCAmelCase = xgboost(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase , lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCAmelCase , lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 248
| 1
|
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14
|
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=7 ,UpperCAmelCase_=3 ,UpperCAmelCase_=18 ,UpperCAmelCase_=30 ,UpperCAmelCase_=4_00 ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=[0.5, 0.5, 0.5] ,UpperCAmelCase_=[0.5, 0.5, 0.5] ,):
_lowercase : Tuple = size if size is not None else {"""shortest_edge""": 18}
_lowercase : int = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowercase : Optional[Any] = parent
_lowercase : Tuple = batch_size
_lowercase : Optional[int] = num_channels
_lowercase : Tuple = image_size
_lowercase : Dict = min_resolution
_lowercase : int = max_resolution
_lowercase : Optional[int] = do_resize
_lowercase : Any = size
_lowercase : str = do_center_crop
_lowercase : Optional[Any] = crop_size
_lowercase : Union[str, Any] = do_normalize
_lowercase : Union[str, Any] = image_mean
_lowercase : Tuple = image_std
def lowerCamelCase__ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self ):
_lowercase : Tuple = LevitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self ):
_lowercase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""size""" ) )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
_lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
# Initialize image_processing
_lowercase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ ,Image.Image )
# Test not batched input
_lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
_lowercase : Optional[int] = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def lowerCamelCase__ ( self ):
# Initialize image_processing
_lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ ,np.ndarray )
# Test not batched input
_lowercase : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
_lowercase : Optional[Any] = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def lowerCamelCase__ ( self ):
# Initialize image_processing
_lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ ,torch.Tensor )
# Test not batched input
_lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
_lowercase : int = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 353
|
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336
| 0
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = 0
if start < end:
a_ = randint(UpperCAmelCase , UpperCAmelCase )
a_ = a[end]
a_ = a[pivot]
a_ = temp
a_ , a_ = _in_place_partition(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
count += _in_place_quick_sort(UpperCAmelCase , UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(UpperCAmelCase , p + 1 , UpperCAmelCase )
return count
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = 0
a_ = randint(UpperCAmelCase , UpperCAmelCase )
a_ = a[end]
a_ = a[pivot]
a_ = temp
a_ = start - 1
for index in range(UpperCAmelCase , UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a_ = new_pivot_index + 1
a_ = a[new_pivot_index]
a_ = a[index]
a_ = temp
a_ = a[new_pivot_index + 1]
a_ = a[end]
a_ = temp
return new_pivot_index + 1, count
UpperCamelCase_ = TemporaryFile()
UpperCamelCase_ = 100 # 1000 elements are to be sorted
UpperCamelCase_ , UpperCamelCase_ = 0, 1 # mean and standard deviation
UpperCamelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCamelCase_ = np.load(outfile)
UpperCamelCase_ = len(M) - 1
UpperCamelCase_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 243
|
"""simple docstring"""
import math
import os
import sys
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = ""
try:
with open(UpperCAmelCase , "rb" ) as binary_file:
a_ = binary_file.read()
for dat in data:
a_ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
lexicon.pop(UpperCAmelCase )
a_ = last_match_id
if math.loga(UpperCAmelCase ).is_integer():
for curr_key in lexicon:
a_ = "0" + lexicon[curr_key]
a_ = bin(UpperCAmelCase )[2:]
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = {"0": "0", "1": "1"}
a_ , a_ = "", ""
a_ = len(UpperCAmelCase )
for i in range(len(UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
index += 1
a_ = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
a_ = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = os.path.getsize(UpperCAmelCase )
a_ = bin(UpperCAmelCase )[2:]
a_ = len(UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = 8
try:
with open(UpperCAmelCase , "wb" ) as opened_file:
a_ = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(UpperCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = read_file_binary(UpperCAmelCase )
a_ = compress_data(UpperCAmelCase )
a_ = add_file_length(UpperCAmelCase , UpperCAmelCase )
write_file_binary(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 243
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A ) -> Dict:
# initialize config
if "resnet-50" in model_name:
lowerCAmelCase__ = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
lowerCAmelCase__ = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
lowerCAmelCase__ = DetrConfig(use_timm_backbone=A , backbone_config=A )
# set label attributes
lowerCAmelCase__ = '''panoptic''' in model_name
if is_panoptic:
lowerCAmelCase__ = 250
else:
lowerCAmelCase__ = 91
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''coco-detection-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _snake_case ( A ) -> Tuple:
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def _snake_case ( A , A , A ) -> Optional[int]:
lowerCAmelCase__ = state_dict.pop(A )
lowerCAmelCase__ = val
def _snake_case ( A , A=False ) -> str:
lowerCAmelCase__ = ''''''
if is_panoptic:
lowerCAmelCase__ = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:256, :]
lowerCAmelCase__ = in_proj_bias[:256]
lowerCAmelCase__ = in_proj_weight[256:512, :]
lowerCAmelCase__ = in_proj_bias[256:512]
lowerCAmelCase__ = in_proj_weight[-256:, :]
lowerCAmelCase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:256, :]
lowerCAmelCase__ = in_proj_bias[:256]
lowerCAmelCase__ = in_proj_weight[256:512, :]
lowerCAmelCase__ = in_proj_bias[256:512]
lowerCAmelCase__ = in_proj_weight[-256:, :]
lowerCAmelCase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase__ = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase__ = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[:256]
lowerCAmelCase__ = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[256:512]
lowerCAmelCase__ = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[-256:]
def _snake_case ( ) -> List[Any]:
lowerCAmelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def _snake_case ( A , A=None , A=False ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ = get_detr_config(A )
# load original model from torch hub
lowerCAmelCase__ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"""Converting model {model_name}...""" )
lowerCAmelCase__ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=A ).eval()
lowerCAmelCase__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(A ):
if is_panoptic:
lowerCAmelCase__ = '''detr.''' + src
rename_key(A , A , A )
# query, key and value matrices need special treatment
read_in_q_k_v(A , is_panoptic=A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase__ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowerCAmelCase__ = state_dict.pop(A )
lowerCAmelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase__ = state_dict.pop(A )
lowerCAmelCase__ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowerCAmelCase__ = state_dict.pop(A )
lowerCAmelCase__ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowerCAmelCase__ = state_dict.pop(A )
lowerCAmelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase__ = DetrForSegmentation(A ) if is_panoptic else DetrForObjectDetection(A )
model.load_state_dict(A )
model.eval()
# verify our conversion on an image
lowerCAmelCase__ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowerCAmelCase__ = DetrImageProcessor(format=A )
lowerCAmelCase__ = processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase__ = encoding['''pixel_values''']
lowerCAmelCase__ = detr(A )
lowerCAmelCase__ = model(A )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
__UpperCAmelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 228
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> List[str]:
lowerCAmelCase__ = value
lowerCAmelCase__ = None # Added in order to delete a node easier
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> Union[str, Any]:
lowerCAmelCase__ = root
def __str__( self ) -> str:
return str(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if new_children is not None: # reset its kids
lowerCAmelCase__ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCamelCase_ ): # If it is the right children
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
return self.root is None
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = Node(lowerCamelCase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ = new_node
break
else:
lowerCAmelCase__ = parent_node.right
lowerCAmelCase__ = parent_node
def __SCREAMING_SNAKE_CASE ( self , *lowerCamelCase_ ) -> None:
for value in values:
self.__insert(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ = node.left if value < node.value else node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase__ = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ = node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
lowerCAmelCase__ = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ = self.root
while node.left is not None:
lowerCAmelCase__ = node.left
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self.search(lowerCamelCase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCamelCase_ , lowerCamelCase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCamelCase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCamelCase_ , node.left )
else:
lowerCAmelCase__ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if node:
self.inorder(lowerCamelCase_ , node.left )
arr.append(node.value )
self.inorder(lowerCamelCase_ , node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = []
self.inorder(lowerCamelCase_ , lowerCamelCase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def _snake_case ( A ) -> list[Node]:
lowerCAmelCase__ = []
if curr_node is not None:
lowerCAmelCase__ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _snake_case ( ) -> None:
lowerCAmelCase__ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ = BinarySearchTree()
for i in testlist:
t.insert(A )
# Prints all the elements of the list in order traversal
print(A )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(A )
print(A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 228
| 1
|
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list[float] , SCREAMING_SNAKE_CASE__ : Dict ):
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(SCREAMING_SNAKE_CASE__ ):
print(F'''{i}\t\t{d}''' )
def _A ( SCREAMING_SNAKE_CASE__ : list[dict[str, int]] , SCREAMING_SNAKE_CASE__ : list[float] , SCREAMING_SNAKE_CASE__ : int ):
for j in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( SCREAMING_SNAKE_CASE__ : list[dict[str, int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Optional[int] = [float('''inf''' )] * vertex_count
UpperCamelCase :Any = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCamelCase :str = distance[u] + w
UpperCamelCase :Tuple = check_negative_cycle(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = int(input("""Enter number of vertices: """).strip())
__snake_case = int(input("""Enter number of edges: """).strip())
__snake_case = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
__snake_case , __snake_case , __snake_case = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
__snake_case = {"""src""": src, """dst""": dest, """weight""": weight}
__snake_case = int(input("""\nEnter shortest path source:""").strip())
__snake_case = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 259
|
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ):
UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCamelCase :Dict = 0
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
# Consider rest of the activities
for j in range(SCREAMING_SNAKE_CASE__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
UpperCamelCase :List[str] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = [1, 3, 0, 5, 8, 5]
__snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 259
| 1
|
"""simple docstring"""
def a_ ( _lowercase , _lowercase ):
"""simple docstring"""
_UpperCamelCase : int = [1]
for i in range(2 , _lowercase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_UpperCamelCase : str = []
_UpperCamelCase : List[Any] = list(range(_lowercase ) )
# Find permutation
while factorials:
_UpperCamelCase : Optional[Any] = factorials.pop()
_UpperCamelCase : Dict = divmod(_lowercase , _lowercase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def a_ ( _lowercase ):
return choice(_lowercase )
def a_ ( _lowercase , _lowercase ):
_UpperCamelCase : Optional[int] = random_pivot(_lowercase )
# partition based on pivot
# linear time
_UpperCamelCase : Union[str, Any] = [e for e in lst if e < pivot]
_UpperCamelCase : int = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_lowercase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_lowercase ) < k - 1:
return kth_number(_lowercase , k - len(_lowercase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A__:
"""simple docstring"""
def __init__( self , _lowercase , ) -> Dict:
a_ : Union[str, Any] = parent
a_ : List[Any] = 13
a_ : Tuple = 7
a_ : str = True
a_ : Optional[Any] = True
a_ : List[Any] = True
a_ : List[str] = 99
a_ : Dict = 32
a_ : int = 2
a_ : str = 4
a_ : Dict = 37
a_ : str = """gelu"""
a_ : List[Any] = 0.1
a_ : Optional[int] = 0.1
a_ : str = 512
a_ : Any = 16
a_ : List[Any] = 2
a_ : Union[str, Any] = 0.0_2
a_ : str = 3
a_ : int = 4
a_ : List[Any] = None
def UpperCamelCase__ ( self ) -> int:
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Optional[int] = None
if self.use_input_mask:
a_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : int = None
a_ : int = None
a_ : Union[str, Any] = None
if self.use_labels:
a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Optional[int] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Optional[Any]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Optional[int] = self.prepare_config_and_inputs()
a_ : Tuple = True
a_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
a_ : Dict = TFEsmModel(config=_lowercase )
a_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
a_ : List[str] = model(_lowercase )
a_ : int = [input_ids, input_mask]
a_ : Dict = model(_lowercase )
a_ : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[str]:
a_ : int = True
a_ : List[Any] = TFEsmModel(config=_lowercase )
a_ : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
a_ : Any = model(_lowercase )
a_ : Dict = [input_ids, input_mask]
a_ : Tuple = model(_lowercase , encoder_hidden_states=_lowercase )
# Also check the case where encoder outputs are not passed
a_ : str = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
a_ : Dict = TFEsmForMaskedLM(config=_lowercase )
a_ : str = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
a_ : Optional[Any] = self.num_labels
a_ : Any = TFEsmForTokenClassification(config=_lowercase )
a_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
a_ : int = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ) -> Any:
a_ : List[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[str] = config_and_inputs
a_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__(a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : Union[str, Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_A : Tuple = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : List[str] = False
_A : Union[str, Any] = False
def UpperCamelCase__ ( self ) -> Tuple:
a_ : List[str] = TFEsmModelTester(self )
a_ : Union[str, Any] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> str:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowercase )
def UpperCamelCase__ ( self ) -> str:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCamelCase__ ( self ) -> int:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def UpperCamelCase__ ( self ) -> Any:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Dict = TFEsmModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
def UpperCamelCase__ ( self ) -> List[str]:
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = model_class(_lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
a_ : List[Any] = model.get_bias()
assert isinstance(_lowercase , _lowercase )
for k, v in name.items():
assert isinstance(_lowercase , tf.Variable )
else:
a_ : Any = model.get_output_embeddings()
assert x is None
a_ : List[str] = model.get_bias()
assert name is None
@require_tf
class A__(unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> int:
a_ : Tuple = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
a_ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
a_ : List[str] = model(_lowercase )[0]
a_ : int = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _lowercase )
# compare the actual values for a slice.
a_ : int = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : int = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
a_ : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a_ : int = model(_lowercase )[0]
# compare the actual values for a slice.
a_ : str = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 248
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__(a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[Any]:
super().__init__(*_lowercase , **_lowercase )
a_ : Optional[int] = eval_examples
a_ : Tuple = post_process_function
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = "eval" ) -> Union[str, Any]:
a_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
a_ : List[str] = self.get_eval_dataloader(_lowercase )
a_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ : Optional[int] = self.compute_metrics
a_ : List[str] = None
a_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Any = time.time()
try:
a_ : Union[str, Any] = eval_loop(
_lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Dict = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ : List[Any] = self.post_process_function(_lowercase , _lowercase , output.predictions )
a_ : Optional[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : List[str] = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
a_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase = "test" ) -> str:
a_ : Tuple = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
a_ : List[Any] = self.compute_metrics
a_ : int = None
a_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Union[str, Any] = time.time()
try:
a_ : List[str] = eval_loop(
_lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Optional[Any] = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ : Optional[int] = self.post_process_function(_lowercase , _lowercase , output.predictions , """predict""" )
a_ : List[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : int = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 248
| 1
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
__A = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
__A = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCamelCase = numpy_to_pil(UpperCamelCase__ )
return images
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if images.ndim == 3:
__lowerCamelCase = images[None, ...]
__lowerCamelCase = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__lowerCamelCase = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
__lowerCamelCase = [Image.fromarray(UpperCamelCase__ ) for image in images]
return pil_images
| 348
|
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Tuple = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = ["DeiTFeatureExtractor"]
UpperCAmelCase_ : List[Any] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 200
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336
| 0
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowercase :
def __init__( self , snake_case , snake_case , snake_case ):
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
snake_case_ = img
snake_case_ = img.shape[1]
snake_case_ = img.shape[0]
snake_case_ = dst_width
snake_case_ = dst_height
snake_case_ = self.src_w / self.dst_w
snake_case_ = self.src_h / self.dst_h
snake_case_ = snake_case_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def a ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
snake_case_ = self.img[self.get_y(snake_case )][self.get_x(snake_case )]
def a ( self , snake_case ):
return int(self.ratio_x * x )
def a ( self , snake_case ):
return int(self.ratio_y * y )
if __name__ == "__main__":
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = 800, 600
_UpperCAmelCase : List[Any] = imread("""image_data/lena.jpg""", 1)
_UpperCAmelCase : str = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 200
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : int = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : int = '''canine'''
def __init__( self , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_6384 , snake_case=16 , snake_case=0.02 , snake_case=1e-1_2 , snake_case=0 , snake_case=0xE000 , snake_case=0xE001 , snake_case=4 , snake_case=4 , snake_case=8 , snake_case=1_6384 , snake_case=128 , **snake_case , ):
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
# Character config:
snake_case_ = downsampling_rate
snake_case_ = upsampling_kernel_size
snake_case_ = num_hash_functions
snake_case_ = num_hash_buckets
snake_case_ = local_transformer_stride
| 200
| 1
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ = '''FlavaImageProcessor'''
UpperCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self :List[str] , __magic_name__ :Tuple=None , __magic_name__ :Dict=None , **__magic_name__ :List[Any] ):
'''simple docstring'''
a = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
a = kwargs.pop("""feature_extractor""" )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
a = self.image_processor
def __call__( self :Union[str, Any] , __magic_name__ :Optional[ImageInput] = None , __magic_name__ :Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __magic_name__ :bool = True , __magic_name__ :Union[bool, str, PaddingStrategy] = False , __magic_name__ :Union[bool, str, TruncationStrategy] = False , __magic_name__ :Optional[int] = None , __magic_name__ :int = 0 , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :bool = False , __magic_name__ :bool = False , __magic_name__ :bool = False , __magic_name__ :bool = False , __magic_name__ :bool = True , __magic_name__ :Optional[Union[str, TensorType]] = None , **__magic_name__ :int , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
a = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if images is not None:
a = self.image_processor(
__magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if text is not None and images is not None:
encoding.update(__magic_name__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowerCamelCase__ ( self :List[str] , *__magic_name__ :str , **__magic_name__ :str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] , *__magic_name__ :Tuple , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 228
|
from __future__ import annotations
def __A ( __lowerCamelCase , __lowerCamelCase ) -> float:
a = sorted(numsa + numsa )
a , a = divmod(len(__lowerCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
__UpperCamelCase : List[Any] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 228
| 1
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
# Initialise PyTorch model
UpperCamelCase : Tuple = BertConfig.from_json_file(_lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase : Union[str, Any] = BertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 361
|
class A__ : # Public class to implement a graph
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = row
UpperCamelCase : Any = col
UpperCamelCase : Optional[Any] = graph
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCamelCase : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCamelCase : Any = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , A_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , A_ )
def __UpperCamelCase( self ): # And finally, count all islands.
'''simple docstring'''
UpperCamelCase : str = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCamelCase : int = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(A_ , A_ , A_ )
count += 1
return count
| 140
| 0
|
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_A : Dict =6_378_137.0
_A : str =6_356_752.314_245
_A : Union[str, Any] =6_378_137
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCamelCase__ : str = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
lowerCamelCase__ : List[Any] = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCamelCase__ : str = haversine_distance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCamelCase__ : Any = (b_lata + b_lata) / 2
lowerCamelCase__ : int = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCamelCase__ : Optional[int] = (sin(_lowerCAmelCase ) ** 2) * (cos(_lowerCAmelCase ) ** 2)
lowerCamelCase__ : int = cos(sigma / 2 ) ** 2
lowerCamelCase__ : Tuple = (sigma - sin(_lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCamelCase__ : Tuple = (cos(_lowerCAmelCase ) ** 2) * (sin(_lowerCAmelCase ) ** 2)
lowerCamelCase__ : Union[str, Any] = sin(sigma / 2 ) ** 2
lowerCamelCase__ : Tuple = (sigma + sin(_lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCAmelCase : Tuple =_symbol_database.Default()
UpperCAmelCase : List[Any] =_descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
UpperCAmelCase : Optional[int] =globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCAmelCase : str =None
UpperCAmelCase : List[Any] =b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCAmelCase : str =45
UpperCAmelCase : Optional[Any] =1581
UpperCAmelCase : Dict =1517
UpperCAmelCase : str =1570
UpperCAmelCase : Optional[int] =1584
UpperCAmelCase : str =1793
UpperCAmelCase : Any =1795
UpperCAmelCase : Dict =1916
UpperCAmelCase : str =1864
UpperCAmelCase : Dict =1905
UpperCAmelCase : Union[str, Any] =1919
UpperCAmelCase : Any =2429
UpperCAmelCase : Dict =2208
UpperCAmelCase : int =2418
UpperCAmelCase : str =2323
UpperCAmelCase : Any =2407
# @@protoc_insertion_point(module_scope)
| 128
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_UpperCamelCase = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
_UpperCamelCase = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
_UpperCamelCase = """▁"""
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self ,A ,A="<s>" ,A="</s>" ,A="</s>" ,A="<s>" ,A="<unk>" ,A="<pad>" ,A="<mask>" ,A = None ,**A ,):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
UpperCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase = len(self.sp_model ) - 1
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCamelCase ( self ,A ,A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self ,A ,A = None ,A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
UpperCAmelCase = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self ,A ):
return self.sp_model.encode(A ,out_type=A )
def _UpperCamelCase ( self ,A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(A )
return spm_id if spm_id else self.unk_token_id
def _UpperCamelCase ( self ,A ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = []
UpperCAmelCase = """"""
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(A )
UpperCAmelCase = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self ):
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self ,A ):
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self ,A ,A = None ):
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"""wb""" ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 234
| 0
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
__snake_case = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
__snake_case = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def lowerCAmelCase_ ( __lowerCAmelCase )-> Dict:
'''simple docstring'''
UpperCAmelCase : Dict =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : int =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase : List[str] =numpy_to_pil(__lowerCAmelCase )
return images
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase : Any =images[None, ...]
UpperCAmelCase : Optional[Any] =(images * 2_55).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase : List[str] =[Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
UpperCAmelCase : str =[Image.fromarray(__lowerCAmelCase ) for image in images]
return pil_images
| 348
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348
| 1
|
from math import factorial, radians
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 18 , _SCREAMING_SNAKE_CASE = 10 ) -> float:
lowercase__ = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
lowercase__ = radians(_SCREAMING_SNAKE_CASE )
lowercase__ = angle_in_radians
lowercase__ = 3
lowercase__ = -1
for _ in range(_SCREAMING_SNAKE_CASE ):
result += (b * (angle_in_radians**a)) / factorial(_SCREAMING_SNAKE_CASE )
lowercase__ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 269
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_UpperCamelCase : ClassVar[Features] = Features({'text': Value('string' )} )
_UpperCamelCase : ClassVar[Features] = Features({} )
_UpperCamelCase : str = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 269
| 1
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
A__ = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
class UpperCamelCase__( UpperCamelCase_ ):
lowerCAmelCase__ : Optional[Any] = 'sigmoid'
lowerCAmelCase__ : Union[str, Any] = 'softmax'
lowerCAmelCase__ : Union[str, Any] = 'none'
@add_end_docstrings(
UpperCamelCase_ , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class UpperCamelCase__( UpperCamelCase_ ):
lowerCAmelCase__ : str = False
lowerCAmelCase__ : str = ClassificationFunction.NONE
def __init__( self ,**__UpperCAmelCase ) -> List[Any]:
super().__init__(**lowercase_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase="" ,**__UpperCAmelCase ) -> List[Any]:
A__ = tokenizer_kwargs
A__ = {}
if hasattr(self.model.config ,'return_all_scores' ) and return_all_scores is None:
A__ = self.model.config.return_all_scores
if isinstance(lowercase_ ,lowercase_ ) or top_k is None:
A__ = top_k
A__ = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' ,lowercase_ ,)
if return_all_scores:
A__ = None
else:
A__ = 1
if isinstance(lowercase_ ,lowercase_ ):
A__ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A__ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
A__ = super().__call__(*lowercase_ ,**lowercase_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A__ = 'top_k' not in kwargs
if isinstance(args[0] ,lowercase_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case__ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict[str, GenericTensor]:
A__ = self.framework
if isinstance(lowercase_ ,lowercase_ ):
return self.tokenizer(**lowercase_ ,return_tensors=lowercase_ ,**lowercase_ )
elif isinstance(lowercase_ ,lowercase_ ) and len(lowercase_ ) == 1 and isinstance(inputs[0] ,lowercase_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=lowercase_ ,**lowercase_ )
elif isinstance(lowercase_ ,lowercase_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(lowercase_ ,return_tensors=lowercase_ ,**lowercase_ )
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
return self.model(**lowercase_ )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=1 ,__UpperCAmelCase=True ) -> Optional[int]:
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A__ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A__ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,'function_to_apply' ) and function_to_apply is None:
A__ = self.model.config.function_to_apply
else:
A__ = ClassificationFunction.NONE
A__ = model_outputs['logits'][0]
A__ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A__ = sigmoid(lowercase_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A__ = softmax(lowercase_ )
elif function_to_apply == ClassificationFunction.NONE:
A__ = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A__ = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(lowercase_ )
]
if not _legacy:
dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] ,reverse=lowercase_ )
if top_k is not None:
A__ = dict_scores[:top_k]
return dict_scores
| 221
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =[]
_lowerCamelCase : Dict =[]
for i in range(self.num_layers ):
_lowerCamelCase : Union[str, Any] =self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Any =FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Dict =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : Optional[int] =resnets
_lowerCamelCase : Dict =attentions
if self.add_downsample:
_lowerCamelCase : Union[str, Any] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=True ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCamelCase : Union[str, Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : Union[str, Any] =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Optional[int] =self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCamelCase : str =[]
for i in range(self.num_layers ):
_lowerCamelCase : Tuple =self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Any =FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Union[str, Any] =resnets
if self.add_downsample:
_lowerCamelCase : List[str] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=True ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] =()
for resnet in self.resnets:
_lowerCamelCase : Tuple =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Tuple =self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : str =[]
_lowerCamelCase : List[str] =[]
for i in range(self.num_layers ):
_lowerCamelCase : List[str] =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : Tuple =self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : List[str] =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Tuple =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : int =resnets
_lowerCamelCase : Dict =attentions
if self.add_upsample:
_lowerCamelCase : str =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any]=True ) -> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCamelCase : Optional[int] =res_hidden_states_tuple[-1]
_lowerCamelCase : Union[str, Any] =res_hidden_states_tuple[:-1]
_lowerCamelCase : Any =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : Optional[Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : List[Any] =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
_lowerCamelCase : Optional[Any] =self.upsamplers_a(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : List[str] =[]
for i in range(self.num_layers ):
_lowerCamelCase : Tuple =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : int =self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : str =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : str =resnets
if self.add_upsample:
_lowerCamelCase : List[str] =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any=True ) -> int:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_lowerCamelCase : List[str] =res_hidden_states_tuple[-1]
_lowerCamelCase : str =res_hidden_states_tuple[:-1]
_lowerCamelCase : List[str] =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : Optional[Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
_lowerCamelCase : Union[str, Any] =self.upsamplers_a(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCamelCase : Any =[]
for _ in range(self.num_layers ):
_lowerCamelCase : Optional[int] =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : Tuple =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : List[Any] =resnets
_lowerCamelCase : List[str] =attentions
def __call__( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : List[str]=True ) -> int:
"""simple docstring"""
_lowerCamelCase : Dict =self.resnets[0](lowercase_ , lowercase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCamelCase : Tuple =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : List[str] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
return hidden_states
| 199
| 0
|
import os
import sys
import unittest
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_snake_case = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
_snake_case = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_test_to_tester_mapping(__a)
_lowerCAmelCase : Tuple = get_test_to_tester_mapping(__a)
_lowerCAmelCase : Union[str, Any] = {"BertModelTest": "BertModelTester"}
_lowerCAmelCase : Union[str, Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__a), __a)
self.assertEqual(get_test_info.to_json(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = get_model_to_test_mapping(__a)
_lowerCAmelCase : int = get_model_to_test_mapping(__a)
_lowerCAmelCase : str = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_lowerCAmelCase : str = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__a), __a)
self.assertEqual(get_test_info.to_json(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = get_model_to_tester_mapping(__a)
_lowerCAmelCase : Tuple = get_model_to_tester_mapping(__a)
_lowerCAmelCase : Tuple = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_lowerCAmelCase : Any = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__a), __a)
self.assertEqual(get_test_info.to_json(__a), __a)
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__a: int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1_6000 ):
lowercase__ : Dict = int(round(sample_rate * max_length ) )
if len(__lowercase ) <= sample_length:
return wav
lowercase__ : Optional[Any] = randint(0 , len(__lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(default=__A , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={"help": "A file containing the training audio paths and labels."} )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={"help": "A file containing the validation audio paths and labels."} )
SCREAMING_SNAKE_CASE = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
SCREAMING_SNAKE_CASE = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to \'validation\'"
)
} , )
SCREAMING_SNAKE_CASE = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to \'audio\'"} , )
SCREAMING_SNAKE_CASE = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to \'label\'"} )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE = field(
default=__A , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _lowerCAmelCase( self ) -> Tuple:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def __UpperCamelCase ( ):
lowercase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : Tuple = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase__ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
lowercase__ : Optional[Any] = DatasetDict()
lowercase__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase__ : int = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase__ : Union[str, Any] = feature_extractor.model_input_names[0]
def train_transforms(UpperCAmelCase ):
lowercase__ : str = []
for audio in batch[data_args.audio_column_name]:
lowercase__ : Dict = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowercase )
lowercase__ : Union[str, Any] = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
lowercase__ : Tuple = {model_input_name: inputs.get(__lowercase )}
lowercase__ : int = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCAmelCase ):
lowercase__ : Tuple = [audio['array'] for audio in batch[data_args.audio_column_name]]
lowercase__ : Optional[Any] = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
lowercase__ : Optional[Any] = {model_input_name: inputs.get(__lowercase )}
lowercase__ : Optional[int] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__ : List[Any] = raw_datasets['train'].features[data_args.label_column_name].names
lowercase__ : int = {}, {}
for i, label in enumerate(__lowercase ):
lowercase__ : Tuple = str(__lowercase )
lowercase__ : str = label
# Load the accuracy metric from the datasets package
lowercase__ : int = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase ):
lowercase__ : Optional[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowercase , references=eval_pred.label_ids )
lowercase__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : List[Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__ : str = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowercase , output_all_columns=__lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__ : Tuple = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowercase , output_all_columns=__lowercase )
# Initialize our trainer
lowercase__ : List[Any] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , )
# Training
if training_args.do_train:
lowercase__ : int = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : Optional[Any] = last_checkpoint
lowercase__ : List[Any] = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ : Dict = trainer.evaluate()
trainer.log_metrics('''eval''' , __lowercase )
trainer.save_metrics('''eval''' , __lowercase )
# Write model card and (optionally) push to hub
lowercase__ : int = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 198
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_UpperCAmelCase = """scheduler_config.json"""
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 5
lowerCamelCase_ = 6
lowerCamelCase_ = 7
lowerCamelCase_ = 8
lowerCamelCase_ = 9
lowerCamelCase_ = 1_0
lowerCamelCase_ = 1_1
lowerCamelCase_ = 1_2
lowerCamelCase_ = 1_3
lowerCamelCase_ = 1_4
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = SCHEDULER_CONFIG_NAME
lowerCamelCase_ = []
lowerCamelCase_ = True
@classmethod
def lowerCAmelCase_ ( cls , lowercase = None , lowercase = None , lowercase=False , **lowercase , ):
"""simple docstring"""
A_ , A_ , A_ : int = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , **lowercase ):
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : Optional[Any] = list(set([cls.__name__] + cls._compatibles ) )
A_ : Any = importlib.import_module(__name__.split('.' )[0] )
A_ : Tuple = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 140
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 16
UpperCamelCase__ = 32
def _a ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
__lowerCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : str ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ = mocked_dataloaders # noqa: F811
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1":
__lowerCAmelCase = 2
# New Code #
__lowerCAmelCase = int(args.gradient_accumulation_steps )
# Initialize accelerator
__lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = evaluate.load("glue" , "mrpc" )
set_seed(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 102
|
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCAmelCase , __lowerCAmelCase = [], []
while len(SCREAMING_SNAKE_CASE_ ) > 1:
__lowerCAmelCase , __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
start.append(SCREAMING_SNAKE_CASE_ )
end.append(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 102
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _A ( UpperCAmelCase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = RoCBertTokenizer
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Optional[Any] = filter_non_english
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
__UpperCAmelCase : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : str = {}
for i, value in enumerate(lowerCamelCase__ ):
__UpperCAmelCase : Optional[Any] = i
__UpperCAmelCase : str = i
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(lowerCamelCase__ , lowerCamelCase__ , ensure_ascii=lowerCamelCase__ )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(lowerCamelCase__ , lowerCamelCase__ , ensure_ascii=lowerCamelCase__ )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(lowerCamelCase__ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : str = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__UpperCAmelCase : Optional[int] = {}
for i, token in enumerate(lowerCamelCase__ ):
__UpperCAmelCase : Optional[int] = i
__UpperCAmelCase : int = RoCBertWordpieceTokenizer(vocab=lowerCamelCase__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __A ( self ) -> int:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
__UpperCAmelCase : str = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def __A ( self ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCAmelCase : List[str] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCAmelCase : List[Any] = tokenizer_r.encode_plus(
lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , )
__UpperCAmelCase : Tuple = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ , """do_lower_case""" ) else False
__UpperCAmelCase : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = ["的", "人", "有"]
__UpperCAmelCase : Optional[int] = "".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCAmelCase : Any = True
__UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCAmelCase : Dict = tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCAmelCase : List[str] = tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
__UpperCAmelCase : Tuple = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCAmelCase : Tuple = tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCAmelCase : List[Any] = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCAmelCase : Any = tokenizer.encode("""你好""" , add_special_tokens=lowerCamelCase__ )
__UpperCAmelCase : List[str] = tokenizer.encode("""你是谁""" , add_special_tokens=lowerCamelCase__ )
__UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : str = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCAmelCase : Optional[Any] = "你好,你是谁"
__UpperCAmelCase : int = tokenizer.tokenize(lowerCamelCase__ )
__UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ )
__UpperCAmelCase : int = tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ )
__UpperCAmelCase : int = tokenizer.prepare_for_model(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = tokenizer.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 254
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 50 ):
_UpperCAmelCase : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 234
| 0
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
lowerCamelCase__ : Optional[Any] = Dataset.from_dict(UpperCamelCase )
return dataset
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = get_dataset()
lowerCamelCase__ : int = make_duplicate_clusters(UpperCamelCase__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = get_dataset()
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = deduplicate_dataset(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
print(UpperCamelCase__ )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , UpperCamelCase__ )
| 129
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A : str ={
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Union[str, Any] ={
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : str ={
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : int ={
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if isinstance(UpperCamelCase , UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Any:
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
lowerCamelCase__ : Optional[Any] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.skip_connection.weight''']
lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> str:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.norm.bias''']
lowerCamelCase__ : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : str = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Optional[int] = checkpoint["""time_embed.0.weight"""]
lowerCamelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""]
lowerCamelCase__ : int = checkpoint["""time_embed.2.weight"""]
lowerCamelCase__ : Optional[Any] = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCamelCase__ : Optional[Any] = checkpoint["""label_emb.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""input_blocks.0.0.weight"""]
lowerCamelCase__ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
lowerCamelCase__ : Optional[Any] = unet_config["""down_block_types"""]
lowerCamelCase__ : Any = unet_config["""layers_per_block"""]
lowerCamelCase__ : Any = unet_config["""attention_head_dim"""]
lowerCamelCase__ : List[Any] = unet_config["""block_out_channels"""]
lowerCamelCase__ : str = 1
lowerCamelCase__ : str = channels_list[0]
for i, layer_type in enumerate(UpperCamelCase ):
lowerCamelCase__ : List[Any] = channels_list[i]
lowerCamelCase__ : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : int = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Tuple = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : List[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : str = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Any = f'''down_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.1'''
lowerCamelCase__ : Tuple = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.downsamplers.0'''
lowerCamelCase__ : str = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
lowerCamelCase__ : Union[str, Any] = current_channels
# hardcoded the mid-block for now
lowerCamelCase__ : Any = """mid_block.resnets.0"""
lowerCamelCase__ : Optional[Any] = """middle_block.0"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = """mid_block.attentions.0"""
lowerCamelCase__ : Dict = """middle_block.1"""
lowerCamelCase__ : int = convert_attention(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = """mid_block.resnets.1"""
lowerCamelCase__ : Tuple = """middle_block.2"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Any = unet_config["""up_block_types"""]
for i, layer_type in enumerate(UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : int = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Any = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Dict = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : List[str] = f'''output_blocks.{current_layer-1}.1'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : str = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Any = f'''output_blocks.{current_layer}.1'''
lowerCamelCase__ : Optional[int] = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : Tuple = f'''output_blocks.{current_layer-1}.2'''
lowerCamelCase__ : List[str] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = checkpoint["""out.0.weight"""]
lowerCamelCase__ : Dict = checkpoint["""out.0.bias"""]
lowerCamelCase__ : Dict = checkpoint["""out.2.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_A : Tuple =argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_A : Tuple =parser.parse_args()
_A : Optional[int] =strabool(args.class_cond)
_A : List[str] =os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A : int =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Tuple =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A : Any =TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
_A : str =None
_A : Optional[int] =con_pt_to_diffuser(args.unet_path, unet_config)
_A : Optional[int] =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A : Tuple =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A : int =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Union[str, Any] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
_A : str =CMStochasticIterativeScheduler(**scheduler_config)
_A : Optional[Any] =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 129
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _lowercase ( ) -> Generator[int, None, None]:
__lowerCAmelCase : dict[int, int] = {}
__lowerCAmelCase : List[Any] = 2
while True:
__lowerCAmelCase : int = factor_map.pop(__snake_case ,__snake_case )
if factor:
__lowerCAmelCase : Union[str, Any] = factor + prime
while x in factor_map:
x += factor
__lowerCAmelCase : str = factor
else:
__lowerCAmelCase : List[str] = prime
yield prime
prime += 1
def _lowercase ( __snake_case = 1e1_0 ) -> int:
__lowerCAmelCase : List[Any] = sieve()
__lowerCAmelCase : List[Any] = 1
while True:
__lowerCAmelCase : Union[str, Any] = next(__snake_case )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__snake_case )
n += 2
if __name__ == "__main__":
print(solution())
| 269
|
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 269
| 1
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
UpperCamelCase_ = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
UpperCamelCase_ = parser.parse_args()
logger.info(F"Loading data from {args.data_file}")
with open(args.data_file, '''rb''') as fp:
UpperCamelCase_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
UpperCamelCase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCamelCase_ = [0] * args.vocab_size
for k, v in counter.items():
UpperCamelCase_ = v
logger.info(F"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 359
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger()
@dataclass
class _snake_case :
'''simple docstring'''
A__ : nn.Module
A__ : List[nn.Module] = field(default_factory=__snake_case )
A__ : list = field(default_factory=__snake_case )
def A__ ( self: str ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tensor ,lowerCamelCase_: Tensor ) -> Optional[int]:
UpperCAmelCase_ : Dict = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase_ ,nn.Convad ) or isinstance(lowerCamelCase_ ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase_ )
def __call__( self: Tuple ,lowerCamelCase_: Tensor ) -> Dict:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def A__ ( self: List[str] ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : nn.Module
A__ : nn.Module
A__ : int = 1
A__ : List = field(default_factory=__snake_case )
A__ : List = field(default_factory=__snake_case )
A__ : bool = True
def __call__( self: Tuple ,lowerCamelCase_: Tensor ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = Tracker(self.dest )(lowerCamelCase_ ).parametrized
UpperCAmelCase_ : Any = Tracker(self.src )(lowerCamelCase_ ).parametrized
UpperCAmelCase_ : int = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.src_skip ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.dest_skip ,lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCamelCase_ )} operations while'''
F''' destination module has {len(lowerCamelCase_ )}.''' )
for dest_m, src_m in zip(lowerCamelCase_ ,lowerCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] ,lowerCamelCase_: nn.Module ) -> List[str]:
super().__init__()
UpperCAmelCase_ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), F'''Unexpected layer name {k}'''
UpperCAmelCase_ : Tuple = len(lowerCamelCase_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
UpperCAmelCase_ : Optional[int] = nn.ModuleDict(lowerCamelCase_ )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tensor ) -> List[str]:
return get_trunk_forward_outputs(
lowerCamelCase_ ,out_feat_keys=lowerCamelCase_ ,feature_blocks=self._feature_blocks ,)
class _snake_case ( __snake_case ):
'''simple docstring'''
def A__ ( self: Dict ,lowerCamelCase_: str ) -> str:
UpperCAmelCase_ : str = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self: Union[str, Any] ,lowerCamelCase_: str ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
UpperCAmelCase_ : str = self.convert_name_to_timm(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = partial(lambda: (timm.create_model(lowerCamelCase_ ,pretrained=lowerCamelCase_ ).eval(), None) )
else:
UpperCAmelCase_ : Optional[int] = super().__getitem__(lowerCamelCase_ )
return val
class _snake_case ( __snake_case ):
'''simple docstring'''
def __getitem__( self: Union[str, Any] ,lowerCamelCase_: str ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
UpperCAmelCase_ : Tuple = RegNetModel
else:
UpperCAmelCase_ : Union[str, Any] = RegNetForImageClassification
return val
def lowerCamelCase_ ( _a : str , _a : int , _a : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
UpperCAmelCase_ : int = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def lowerCamelCase_ ( _a : str , _a : Callable[[], nn.Module] , _a : Callable[[], nn.Module] , _a : RegNetConfig , _a : Path , _a : bool = True , ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ : Any = from_model_func()
UpperCAmelCase_ : str = our_model_func(_a ).eval()
UpperCAmelCase_ : List[Any] = ModuleTransfer(src=_a , dest=_a , raise_if_mismatch=_a )
UpperCAmelCase_ : List[str] = torch.randn((1, 3, 224, 224) )
module_transfer(_a )
if from_state_dict is not None:
UpperCAmelCase_ : List[str] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : List[Any] = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
UpperCAmelCase_ : str = manually_copy_vissl_head(_a , our_model.state_dict() , _a )
our_model.load_state_dict(_a )
UpperCAmelCase_ : Union[str, Any] = our_model(_a , output_hidden_states=_a )
UpperCAmelCase_ : int = (
our_outputs.logits if isinstance(_a , _a ) else our_outputs.last_hidden_state
)
UpperCAmelCase_ : Optional[int] = from_model(_a )
UpperCAmelCase_ : List[Any] = from_output[-1] if type(_a ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(_a , _a ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=_a , )
UpperCAmelCase_ : Union[str, Any] = 224 if """seer""" not in name else 384
# we can use the convnext one
UpperCAmelCase_ : Optional[int] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=_a )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=_a , )
print(F'''Pushed {name}''' )
def lowerCamelCase_ ( _a : Path , _a : str = None , _a : bool = True ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
UpperCAmelCase_ : List[Any] = 1000
UpperCAmelCase_ : Any = (1, num_labels)
UpperCAmelCase_ : Tuple = """huggingface/label-files"""
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : List[str] = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type="""dataset""" ) ) , """r""" ) )
UpperCAmelCase_ : Union[str, Any] = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Union[str, Any] = partial(_a , num_labels=_a , idalabel=_a , labelaid=_a )
UpperCAmelCase_ : List[Any] = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
UpperCAmelCase_ : List[Any] = NameToOurModelFuncMap()
UpperCAmelCase_ : Union[str, Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_a : str , _a : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase_ : Optional[Any] = torch.hub.load_state_dict_from_url(_a , model_dir=str(_a ) , map_location="""cpu""" )
UpperCAmelCase_ : Union[str, Any] = model_func()
# check if we have a head, if yes add it
UpperCAmelCase_ : Optional[Any] = files["""classy_state_dict"""]["""base_model"""]["""model"""]
UpperCAmelCase_ : Optional[Any] = model_state_dict["""trunk"""]
model.load_state_dict(_a )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase_ : List[Any] = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : str = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Tuple = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ : Optional[int] = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
UpperCAmelCase_ : Dict = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Dict = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Any = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ : List[Any] = partial(
_a , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _a , _a , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _a , _a , _a , )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 59
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = KandinskyVaaControlnetPipeline
__UpperCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
__UpperCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
__UpperCamelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__UpperCamelCase = False
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Union[str, Any] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
A_ : List[Any] = UNetaDConditionModel(**snake_case )
return model
@property
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : List[Any] = self.dummy_unet
A_ : Union[str, Any] = self.dummy_movq
A_ : List[str] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=snake_case , set_alpha_to_one=snake_case , steps_offset=1 , prediction_type="epsilon" , thresholding=snake_case , )
A_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[Any] , snake_case :Optional[int]=0 ):
'''simple docstring'''
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case ) ).to(snake_case )
A_ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case )
# create hint
A_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case ) ).to(snake_case )
if str(snake_case ).startswith("mps" ):
A_ : str = torch.manual_seed(snake_case )
else:
A_ : Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
A_ : int = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = "cpu"
A_ : List[str] = self.get_dummy_components()
A_ : Union[str, Any] = self.pipeline_class(**snake_case )
A_ : Optional[Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A_ : Any = pipe(**self.get_dummy_inputs(snake_case ) )
A_ : Dict = output.images
A_ : Union[str, Any] = pipe(
**self.get_dummy_inputs(snake_case ) , return_dict=snake_case , )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : int = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
A_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
A_ : Optional[int] = torch.from_numpy(np.array(snake_case ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
A_ : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case )
A_ : Optional[int] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
A_ : Dict = pipeline.to(snake_case )
pipeline.set_progress_bar_config(disable=snake_case )
A_ : int = "A robot, 4k photo"
A_ : Tuple = torch.Generator(device="cuda" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
snake_case , generator=snake_case , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
A_ : List[Any] = torch.Generator(device="cuda" ).manual_seed(0 )
A_ : Tuple = pipeline(
image_embeds=snake_case , negative_image_embeds=snake_case , hint=snake_case , generator=snake_case , num_inference_steps=100 , output_type="np" , )
A_ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case , snake_case )
| 300
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300
| 1
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCamelCase_ = logging.get_logger(__name__)
# General docstring
UpperCamelCase_ = 'ResNetConfig'
# Base docstring
UpperCamelCase_ = 'microsoft/resnet-50'
UpperCamelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCamelCase_ = 'microsoft/resnet-50'
UpperCamelCase_ = 'tiger cat'
UpperCamelCase_ = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 3 , __UpperCAmelCase = 1 , __UpperCAmelCase = "relu") ->Union[str, Any]:
super().__init__()
a_ = nn.Convad(
__UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=kernel_size // 2 , bias=__UpperCAmelCase)
a_ = nn.BatchNormad(__UpperCAmelCase)
a_ = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tensor:
a_ = self.convolution(__UpperCAmelCase)
a_ = self.normalization(__UpperCAmelCase)
a_ = self.activation(__UpperCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase) ->Any:
super().__init__()
a_ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
a_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
a_ = config.num_channels
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tensor:
a_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
a_ = self.embedder(__UpperCAmelCase)
a_ = self.pooler(__UpperCAmelCase)
return embedding
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 2) ->List[str]:
super().__init__()
a_ = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , stride=__UpperCAmelCase , bias=__UpperCAmelCase)
a_ = nn.BatchNormad(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tensor:
a_ = self.convolution(__UpperCAmelCase)
a_ = self.normalization(__UpperCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = "relu") ->Union[str, Any]:
super().__init__()
a_ = in_channels != out_channels or stride != 1
a_ = (
ResNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) if should_apply_shortcut else nn.Identity()
)
a_ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , activation=__UpperCAmelCase) , )
a_ = ACTaFN[activation]
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[Any]:
a_ = hidden_state
a_ = self.layer(__UpperCAmelCase)
a_ = self.shortcut(__UpperCAmelCase)
hidden_state += residual
a_ = self.activation(__UpperCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = "relu" , __UpperCAmelCase = 4) ->Dict:
super().__init__()
a_ = in_channels != out_channels or stride != 1
a_ = out_channels // reduction
a_ = (
ResNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) if should_apply_shortcut else nn.Identity()
)
a_ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=__UpperCAmelCase) , )
a_ = ACTaFN[activation]
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
a_ = hidden_state
a_ = self.layer(__UpperCAmelCase)
a_ = self.shortcut(__UpperCAmelCase)
hidden_state += residual
a_ = self.activation(__UpperCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , ) ->str:
super().__init__()
a_ = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
a_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , activation=config.hidden_act) , *[layer(__UpperCAmelCase , __UpperCAmelCase , activation=config.hidden_act) for _ in range(depth - 1)] , )
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tensor:
a_ = input
for layer in self.layers:
a_ = layer(__UpperCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase) ->List[Any]:
super().__init__()
a_ = nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
a_ = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(__UpperCAmelCase , config.depths[1:]):
self.stages.append(ResNetStage(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , depth=__UpperCAmelCase))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True) ->BaseModelOutputWithNoAttention:
a_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
a_ = stage_module(__UpperCAmelCase)
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase , )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = ResNetConfig
a_ : List[Any] = """resnet"""
a_ : Tuple = """pixel_values"""
a_ : Any = True
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
if isinstance(__UpperCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu")
elif isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False) ->List[Any]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = value
UpperCamelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , SCREAMING_SNAKE_CASE_ , )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase) ->Tuple:
super().__init__(__UpperCAmelCase)
a_ = config
a_ = ResNetEmbeddings(__UpperCAmelCase)
a_ = ResNetEncoder(__UpperCAmelCase)
a_ = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None) ->BaseModelOutputWithPoolingAndNoAttention:
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.embedder(__UpperCAmelCase)
a_ = self.encoder(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase)
a_ = encoder_outputs[0]
a_ = self.pooler(__UpperCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , SCREAMING_SNAKE_CASE_ , )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase) ->Union[str, Any]:
super().__init__(__UpperCAmelCase)
a_ = config.num_labels
a_ = ResNetModel(__UpperCAmelCase)
# classification head
a_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ) ->ImageClassifierOutputWithNoAttention:
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.resnet(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase)
a_ = outputs.pooler_output if return_dict else outputs[1]
a_ = self.classifier(__UpperCAmelCase)
a_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a_ = "single_label_classification"
else:
a_ = "multi_label_classification"
if self.config.problem_type == "regression":
a_ = MSELoss()
if self.num_labels == 1:
a_ = loss_fct(logits.squeeze() , labels.squeeze())
else:
a_ = loss_fct(__UpperCAmelCase , __UpperCAmelCase)
elif self.config.problem_type == "single_label_classification":
a_ = CrossEntropyLoss()
a_ = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
a_ = BCEWithLogitsLoss()
a_ = loss_fct(__UpperCAmelCase , __UpperCAmelCase)
if not return_dict:
a_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states)
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , SCREAMING_SNAKE_CASE_ , )
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase) ->Dict:
super().__init__(__UpperCAmelCase)
super()._init_backbone(__UpperCAmelCase)
a_ = [config.embedding_size] + config.hidden_sizes
a_ = ResNetEmbeddings(__UpperCAmelCase)
a_ = ResNetEncoder(__UpperCAmelCase)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
@replace_return_docstrings(output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None) ->BackboneOutput:
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = self.embedder(__UpperCAmelCase)
a_ = self.encoder(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase)
a_ = outputs.hidden_states
a_ = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__UpperCAmelCase , )
| 303
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='imagegpt'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , a_=5_12 + 1 , a_=32 * 32 , a_=5_12 , a_=24 , a_=8 , a_=None , a_="quick_gelu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=1E-5 , a_=0.02 , a_=True , a_=True , a_=False , a_=False , a_=False , **a_ , ):
'''simple docstring'''
__snake_case : Dict = vocab_size
__snake_case : int = n_positions
__snake_case : int = n_embd
__snake_case : List[Any] = n_layer
__snake_case : Any = n_head
__snake_case : List[Any] = n_inner
__snake_case : Tuple = activation_function
__snake_case : Optional[Any] = resid_pdrop
__snake_case : str = embd_pdrop
__snake_case : List[str] = attn_pdrop
__snake_case : Dict = layer_norm_epsilon
__snake_case : List[Any] = initializer_range
__snake_case : Any = scale_attn_weights
__snake_case : List[str] = use_cache
__snake_case : int = scale_attn_by_inverse_layer_idx
__snake_case : Optional[int] = reorder_and_upcast_attn
__snake_case : Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=a_ , **a_ )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = 1 , a_ = -1 , a_ = False , a_ = None , a_ = 3 , a_ = 32 , a_ = 32 , ):
'''simple docstring'''
__snake_case : List[Any] = self._generate_dummy_images(a_ , a_ , a_ , a_ )
__snake_case : List[Any] = dict(preprocessor(images=a_ , return_tensors=a_ ) )
return inputs
| 102
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , )
def SCREAMING_SNAKE_CASE (self , a_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.enable_attention_slicing(a_ )
@torch.no_grad()
def __call__(self , a_ , a_ = 5_12 , a_ = 5_12 , a_ = 50 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , a_ = None , **a_ , ):
'''simple docstring'''
if isinstance(a_ , a_ ):
__snake_case : Any = 1
elif isinstance(a_ , a_ ):
__snake_case : Any = len(a_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a_ )}.""" )
# get prompt text embeddings
__snake_case : int = self.tokenizer(
a_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__snake_case : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__snake_case : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Union[str, Any] = text_embeddings.shape
__snake_case : Optional[int] = text_embeddings.repeat(1 , a_ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , a_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : List[Any] = ['''''']
elif type(a_ ) is not type(a_ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !="""
f""" {type(a_ )}.""" )
elif isinstance(a_ , a_ ):
__snake_case : List[str] = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
__snake_case : Optional[int] = negative_prompt
__snake_case : Optional[int] = text_input_ids.shape[-1]
__snake_case : List[Any] = self.tokenizer(
a_ , padding='''max_length''' , max_length=a_ , truncation=a_ , return_tensors='''pt''' , )
__snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : str = uncond_embeddings.shape[1]
__snake_case : int = uncond_embeddings.repeat(a_ , a_ , 1 )
__snake_case : int = uncond_embeddings.view(batch_size * num_images_per_prompt , a_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Union[str, Any] = torch.randn(
a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to(self.device )
__snake_case : Tuple = torch.randn(a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to(
self.device )
else:
__snake_case : Dict = torch.randn(
a_ , generator=a_ , device=self.device , dtype=a_ )
__snake_case : Dict = torch.randn(a_ , generator=a_ , device=self.device , dtype=a_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__snake_case : Union[str, Any] = latents_reference.to(self.device )
__snake_case : Dict = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case : int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case : int = 0 if dx < 0 else dx
__snake_case : Union[str, Any] = 0 if dy < 0 else dy
__snake_case : str = max(-dx , 0 )
__snake_case : Tuple = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(a_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : Tuple = {}
if accepts_eta:
__snake_case : List[str] = eta
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Tuple = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
__snake_case : int = self.unet(a_ , a_ , encoder_hidden_states=a_ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : Tuple = noise_pred.chunk(2 )
__snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_ , a_ , a_ )
__snake_case : Union[str, Any] = 1 / 0.1_8215 * latents
__snake_case : Optional[Any] = self.vae.decode(a_ ).sample
__snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case : Optional[int] = self.feature_extractor(self.numpy_to_pil(a_ ) , return_tensors='''pt''' ).to(
self.device )
__snake_case , __snake_case : List[Any] = self.safety_checker(
images=a_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case : Union[str, Any] = None
if output_type == "pil":
__snake_case : Union[str, Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=a_ , nsfw_content_detected=a_ )
| 102
| 1
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=0.9_99 , UpperCamelCase__ : Dict="cosine" , ) -> Any:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : str ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__lowerCamelCase = []
for i in range(UpperCamelCase__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 1_000 , lowerCamelCase__ = "fixed_small_log" , lowerCamelCase__ = True , lowerCamelCase__ = 1.0 , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = "squaredcos_cap_v2" , ) -> Union[str, Any]:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
__lowerCamelCase = betas_for_alpha_bar(lowerCamelCase__ )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
__lowerCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# setable values
__lowerCamelCase = None
__lowerCamelCase = torch.from_numpy(np.arange(0 , lowerCamelCase__ )[::-1].copy() )
__lowerCamelCase = variance_type
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = num_inference_steps
__lowerCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__lowerCamelCase = (np.arange(0 , lowerCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
if prev_timestep is None:
__lowerCamelCase = t - 1
__lowerCamelCase = self.alphas_cumprod[t]
__lowerCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__lowerCamelCase = 1 - alpha_prod_t
__lowerCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__lowerCamelCase = self.betas[t]
else:
__lowerCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowerCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__lowerCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__lowerCamelCase = torch.log(torch.clamp(lowerCamelCase__ , min=1e-20 ) )
__lowerCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__lowerCamelCase = variance.log()
__lowerCamelCase = beta.log()
__lowerCamelCase = (predicted_variance + 1) / 2
__lowerCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__=None , lowerCamelCase__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
__lowerCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__lowerCamelCase , __lowerCamelCase = torch.split(lowerCamelCase__ , sample.shape[1] , dim=1 )
else:
__lowerCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
__lowerCamelCase = t - 1
__lowerCamelCase = self.alphas_cumprod[t]
__lowerCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__lowerCamelCase = 1 - alpha_prod_t
__lowerCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__lowerCamelCase = self.betas[t]
__lowerCamelCase = self.alphas[t]
else:
__lowerCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
__lowerCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowerCamelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowerCamelCase = torch.clamp(
lowerCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__lowerCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCamelCase = 0
if t > 0:
__lowerCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase__ , device=model_output.device )
__lowerCamelCase = self._get_variance(
lowerCamelCase__ , predicted_variance=lowerCamelCase__ , prev_timestep=lowerCamelCase__ , )
if self.variance_type == "fixed_small_log":
__lowerCamelCase = variance
elif self.variance_type == "learned_range":
__lowerCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.' )
__lowerCamelCase = variance * variance_noise
__lowerCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> torch.FloatTensor:
'''simple docstring'''
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
__lowerCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = alphas_cumprod[timesteps] ** 0.5
__lowerCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__lowerCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
__lowerCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__lowerCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__lowerCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 348
|
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348
| 1
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Optional[Any] ={
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] =[
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__snake_case : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__snake_case : Dict =logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__(self ,__lowerCamelCase ,**__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : List[str] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : int = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ,__lowerCamelCase="This is a sound of {}." ) -> str:
"""simple docstring"""
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCAmelCase__ : List[str] = requests.get(__lowerCamelCase ).content
else:
with open(__lowerCamelCase ,'''rb''' ) as f:
lowerCAmelCase__ : int = f.read()
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Tuple = ffmpeg_read(__lowerCamelCase ,self.feature_extractor.sampling_rate )
if not isinstance(__lowerCamelCase ,np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowerCAmelCase__ : Any = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='''pt''' )
lowerCAmelCase__ : Union[str, Any] = candidate_labels
lowerCAmelCase__ : str = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,return_tensors=self.framework ,padding=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = [text_inputs]
return inputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop('''candidate_labels''' )
lowerCAmelCase__ : List[str] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,__lowerCamelCase ):
lowerCAmelCase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : List[str] = text_inputs[0][0]
lowerCAmelCase__ : Union[str, Any] = self.model(**__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = model_outputs.pop('''candidate_labels''' )
lowerCAmelCase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCAmelCase__ : str = logits.softmax(dim=0 )
lowerCAmelCase__ : Dict = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowerCAmelCase__ : Any = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase ,__lowerCamelCase ) ,key=lambda __lowerCamelCase : -x[0] )
]
return result
| 129
| 1
|
"""simple docstring"""
from torch import nn
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any], a_: Optional[int], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__()
_snake_case : Tuple = class_size
_snake_case : List[str] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_snake_case : Optional[int] = nn.Linear(a_, a_ )
def UpperCamelCase_ ( self: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : str = self.mlp(a_ )
return logits
| 369
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase__ (snake_case__ : int = 3 ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(snake_case__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
_snake_case : Optional[int] = QuantumRegister(snake_case__ , """qr""" )
_snake_case : List[Any] = ClassicalRegister(snake_case__ , """cr""" )
_snake_case : Optional[int] = QuantumCircuit(snake_case__ , snake_case__ )
_snake_case : Dict = number_of_qubits
for i in range(snake_case__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case__ , snake_case__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case__ , snake_case__ )
# simulate with 10000 shots
_snake_case : Optional[int] = Aer.get_backend("""qasm_simulator""" )
_snake_case : Optional[int] = execute(snake_case__ , snake_case__ , shots=1_00_00 )
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 132
| 0
|
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
|
__lowerCamelCase = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case : List[Any] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ ={'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 325
| 0
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase_ = logging.getLogger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : str , _A : Dict , _A : int , _A : int=None ):
"""simple docstring"""
super().__init__(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__SCREAMING_SNAKE_CASE : Optional[int] = None
def UpperCAmelCase__ ( self : List[str] , _A : int ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
__SCREAMING_SNAKE_CASE : Any = self._infer_socket_ifname()
# avoid clash with the NCCL port
__SCREAMING_SNAKE_CASE : int = str(distributed_port + 1 )
__SCREAMING_SNAKE_CASE : List[str] = dist.new_group(ranks=_A , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase__ ( self : Tuple , _A : Optional[Any] , _A : Any , _A : Union[str, Any]=torch.floataa ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = torch.empty(_A , dtype=_A )
dist.scatter(_A , src=0 , scatter_list=_A , group=self.process_group )
return target_tensor
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__SCREAMING_SNAKE_CASE : List[str] = next((addr for addr in addrs if addr.startswith('''e''' )) , _A )
return ifname
def UpperCAmelCase__ ( self : Any , _A : np.ndarray , _A : int ):
"""simple docstring"""
if not dist.is_initialized():
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(_A , _A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A )
# distributed training
__SCREAMING_SNAKE_CASE : Dict = dist.get_world_size(group=self.process_group )
# gather logic
__SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
__SCREAMING_SNAKE_CASE : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_A )]
dist.gather(torch.tensor(_A ) , dst=0 , gather_list=_A , group=self.process_group )
# scatter logic
__SCREAMING_SNAKE_CASE : List[Any] = question_hidden_states.shape[0]
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : List[Any] = []
if self._is_main():
assert len(_A ) == world_size
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self._main_retrieve(torch.cat(_A ).numpy() , _A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_A ), torch.tensor(_A )
__SCREAMING_SNAKE_CASE : str = self._chunk_tensor(_A , _A )
__SCREAMING_SNAKE_CASE : Any = self._chunk_tensor(_A , _A )
__SCREAMING_SNAKE_CASE : str = self._scattered(_A , [n_queries, n_docs] , target_type=torch.intaa )
__SCREAMING_SNAKE_CASE : Any = self._scattered(_A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_A )
| 303
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303
| 1
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = FunnelConfig.from_json_file(_lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase = FunnelBaseModel(_lowerCamelCase ) if base_model else FunnelModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 363
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = TypeVar("""DatasetType""", Dataset, IterableDataset)
def lowercase( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase_ ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.""" )
if i == 0:
UpperCamelCase , UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ )
else:
return _interleave_iterable_datasets(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase_ ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.""" )
if i == 0:
UpperCamelCase , UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
else:
return _concatenate_iterable_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
| 165
| 0
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
__lowerCamelCase : Optional[torch.FloatTensor] = None
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=0.999 , __lowerCAmelCase="cosine" , )-> str:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCAmelCase : Optional[int] =[]
for i in range(__lowerCAmelCase ):
UpperCAmelCase : Tuple =i / num_diffusion_timesteps
UpperCAmelCase : List[Any] =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ = 1000 , snake_case__ = "fixed_small_log" , snake_case__ = True , snake_case__ = 1.0 , snake_case__ = "epsilon" , snake_case__ = "squaredcos_cap_v2" , ) -> Optional[int]:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
UpperCAmelCase : Any =betas_for_alpha_bar(snake_case__ )
UpperCAmelCase : Optional[int] =1.0 - self.betas
UpperCAmelCase : Optional[int] =torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase : Optional[int] =torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase : Optional[Any] =1.0
# setable values
UpperCAmelCase : Any =None
UpperCAmelCase : Union[str, Any] =torch.from_numpy(np.arange(0 , snake_case__ )[::-1].copy() )
UpperCAmelCase : List[str] =variance_type
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =num_inference_steps
UpperCAmelCase : Any =(self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase : List[Any] =(np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase : Tuple =torch.from_numpy(snake_case__ ).to(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None ) -> Tuple:
'''simple docstring'''
if prev_timestep is None:
UpperCAmelCase : str =t - 1
UpperCAmelCase : Optional[int] =self.alphas_cumprod[t]
UpperCAmelCase : Dict =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase : Dict =1 - alpha_prod_t
UpperCAmelCase : int =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase : List[str] =self.betas[t]
else:
UpperCAmelCase : Dict =1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase : Dict =beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase : int =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase : Union[str, Any] =torch.log(torch.clamp(snake_case__ , min=1e-20 ) )
UpperCAmelCase : Optional[Any] =torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase : Any =variance.log()
UpperCAmelCase : Dict =beta.log()
UpperCAmelCase : Dict =(predicted_variance + 1) / 2
UpperCAmelCase : List[Any] =frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__=None , snake_case__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
UpperCAmelCase : str =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase , UpperCAmelCase : Union[str, Any] =torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
UpperCAmelCase : int =None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase : List[str] =t - 1
UpperCAmelCase : Optional[Any] =self.alphas_cumprod[t]
UpperCAmelCase : List[Any] =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase : Union[str, Any] =1 - alpha_prod_t
UpperCAmelCase : int =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase : List[Any] =self.betas[t]
UpperCAmelCase : Tuple =self.alphas[t]
else:
UpperCAmelCase : str =1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase : Optional[int] =1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase : Tuple =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase : Union[str, Any] =model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase : Dict =torch.clamp(
snake_case__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : Optional[Any] =(alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase : List[str] =alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : List[Any] =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase : List[Any] =0
if t > 0:
UpperCAmelCase : str =randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case__ , device=model_output.device )
UpperCAmelCase : Optional[int] =self._get_variance(
snake_case__ , predicted_variance=snake_case__ , prev_timestep=snake_case__ , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase : Optional[Any] =variance
elif self.variance_type == "learned_range":
UpperCAmelCase : Tuple =(0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
UpperCAmelCase : List[Any] =variance * variance_noise
UpperCAmelCase : int =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , ) -> torch.FloatTensor:
'''simple docstring'''
UpperCAmelCase : str =self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase : int =timesteps.to(original_samples.device )
UpperCAmelCase : Union[str, Any] =alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase : Any =sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase : List[str] =sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase : Any =(1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase : Optional[int] =sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase : int =sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase : Any =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 348
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348
| 1
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
return "".join(sorted(__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
return word_by_signature[signature(__UpperCAmelCase )]
lowerCamelCase__ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCamelCase__ : Any = sorted({word.strip().lower() for word in data.splitlines()})
lowerCamelCase__ : str = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCamelCase__ : Tuple = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 359
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=768 ):
super().__init__(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = proj_size
SCREAMING_SNAKE_CASE_ = CLIPVisionModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PaintByExampleMapper(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int=False ):
SCREAMING_SNAKE_CASE_ = self.model(pixel_values=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = clip_output.pooler_output
SCREAMING_SNAKE_CASE_ = self.mapper(latent_states[:, None] )
SCREAMING_SNAKE_CASE_ = self.final_layer_norm(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.proj_out(_lowerCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE_ = (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE_ = config.hidden_size
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = nn.ModuleList(
[
BasicTransformerBlock(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , activation_fn='gelu' , attention_bias=_lowerCAmelCase )
for _ in range(_lowerCAmelCase )
] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] ):
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = block(_lowerCAmelCase )
return hidden_states
| 210
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.