code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , __A : int , __A : int , __A : int , __A : float , __A : int , __A : int , __A : int , __A : int , __A : str , __A : bool = False , ):
super().__init__()
__UpperCamelCase = nn.Embedding(__A , __A )
__UpperCamelCase = nn.Embedding(__A , __A )
__UpperCamelCase = False
__UpperCamelCase = nn.Dropout(p=__A )
__UpperCamelCase = TaConfig(
vocab_size=__A , d_model=__A , num_heads=__A , d_kv=__A , d_ff=__A , dropout_rate=__A , feed_forward_proj=__A , is_decoder=__A , is_encoder_decoder=__A , )
__UpperCamelCase = nn.ModuleList()
for lyr_num in range(__A ):
__UpperCamelCase = TaBlock(__A )
self.encoders.append(__A )
__UpperCamelCase = TaLayerNorm(__A )
__UpperCamelCase = nn.Dropout(p=__A )
def _lowerCamelCase ( self : Union[str, Any] , __A : Dict , __A : str ):
__UpperCamelCase = self.token_embedder(__A )
__UpperCamelCase = encoder_input_tokens.shape[1]
__UpperCamelCase = torch.arange(__A , device=encoder_input_tokens.device )
x += self.position_encoding(__A )
__UpperCamelCase = self.dropout_pre(__A )
# inverted the attention mask
__UpperCamelCase = encoder_input_tokens.size()
__UpperCamelCase = self.get_extended_attention_mask(__A , __A )
for lyr in self.encoders:
__UpperCamelCase = lyr(__A , __A )[0]
__UpperCamelCase = self.layer_norm(__A )
return self.dropout_post(__A ), encoder_inputs_mask
| 399 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ):
__UpperCamelCase = []
__UpperCamelCase = 0
__UpperCamelCase = 0
def _lowerCamelCase ( self : Dict ):
return self.head == self.tail
def _lowerCamelCase ( self : int , __A : Any ):
self.data.append(__A )
__UpperCamelCase = self.tail + 1
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.data[self.head]
__UpperCamelCase = self.head + 1
return ret
def _lowerCamelCase ( self : Union[str, Any] ):
return self.tail - self.head
def _lowerCamelCase ( self : Optional[int] ):
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class snake_case :
"""simple docstring"""
def __init__( self : str , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = 1
def _lowerCamelCase ( self : str ):
return self.data
def _lowerCamelCase ( self : Optional[int] ):
return self.left
def _lowerCamelCase ( self : Tuple ):
return self.right
def _lowerCamelCase ( self : str ):
return self.height
def _lowerCamelCase ( self : int , __A : Any ):
__UpperCamelCase = data
def _lowerCamelCase ( self : Optional[Any] , __A : MyNode | None ):
__UpperCamelCase = node
def _lowerCamelCase ( self : Tuple , __A : MyNode | None ):
__UpperCamelCase = node
def _lowerCamelCase ( self : List[str] , __A : int ):
__UpperCamelCase = height
def lowercase__ ( __lowercase : MyNode | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def lowercase__ ( __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
if a > b:
return a
return b
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
print('left rotation node:' , node.get_data() )
__UpperCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__lowercase )
__UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
__UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowercase )
return ret
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
print('right rotation node:' , node.get_data() )
__UpperCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__lowercase )
__UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
__UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowercase )
return ret
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
__UpperCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__lowercase ) )
return right_rotation(__lowercase )
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
__UpperCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__lowercase ) )
return left_rotation(__lowercase )
def lowercase__ ( __lowercase : MyNode | None , __lowercase : Any ) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(__lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__UpperCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__UpperCamelCase = right_rotation(__lowercase )
else:
__UpperCamelCase = lr_rotation(__lowercase )
else:
node.set_right(insert_node(node.get_right() , __lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__UpperCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__UpperCamelCase = rl_rotation(__lowercase )
else:
__UpperCamelCase = left_rotation(__lowercase )
__UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
return node
def lowercase__ ( __lowercase : MyNode ) -> Any:
"""simple docstring"""
while True:
__UpperCamelCase = root.get_right()
if right_child is None:
break
__UpperCamelCase = right_child
return root.get_data()
def lowercase__ ( __lowercase : MyNode ) -> Any:
"""simple docstring"""
while True:
__UpperCamelCase = root.get_left()
if left_child is None:
break
__UpperCamelCase = left_child
return root.get_data()
def lowercase__ ( __lowercase : MyNode , __lowercase : Any ) -> MyNode | None:
"""simple docstring"""
__UpperCamelCase = root.get_left()
__UpperCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__UpperCamelCase = get_left_most(__lowercase )
root.set_data(__lowercase )
root.set_right(del_node(__lowercase , __lowercase ) )
elif left_child is not None:
__UpperCamelCase = left_child
elif right_child is not None:
__UpperCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(__lowercase , __lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__lowercase , __lowercase ) )
if get_height(__lowercase ) - get_height(__lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__UpperCamelCase = left_rotation(__lowercase )
else:
__UpperCamelCase = rl_rotation(__lowercase )
elif get_height(__lowercase ) - get_height(__lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__UpperCamelCase = right_rotation(__lowercase )
else:
__UpperCamelCase = lr_rotation(__lowercase )
__UpperCamelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__lowercase )
return root
class snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__UpperCamelCase = None
def _lowerCamelCase ( self : List[Any] ):
return get_height(self.root )
def _lowerCamelCase ( self : Dict , __A : Any ):
print('insert:' + str(__A ) )
__UpperCamelCase = insert_node(self.root , __A )
def _lowerCamelCase ( self : Any , __A : Any ):
print('delete:' + str(__A ) )
if self.root is None:
print('Tree is empty!' )
return
__UpperCamelCase = del_node(self.root , __A )
def __str__( self : Any , ): # a level traversale, gives a more intuitive look on the tree
__UpperCamelCase = ''
__UpperCamelCase = MyQueue()
q.push(self.root )
__UpperCamelCase = self.get_height()
if layer == 0:
return output
__UpperCamelCase = 0
while not q.is_empty():
__UpperCamelCase = q.pop()
__UpperCamelCase = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__A )
q.push(__A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__UpperCamelCase = cnt + 1
for i in range(1_0_0 ):
if cnt == math.pow(2 , __A ) - 1:
__UpperCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowercase__ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a__ : Optional[int] =AVLtree()
a__ : List[str] =list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 399 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Any = LongformerTokenizer
__a : Any = True
__a : List[Any] = LongformerTokenizerFast
__a : Any = True
def snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Any = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : Any = 'lower newer'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE_ : int = tokenizer.tokenize(snake_case__ ) # , add_prefix_space=True)
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=snake_case__ ) ,[0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=snake_case__ ) ,[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] ,)
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode('sequence builders' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(
'sequence builders' ,add_special_tokens=snake_case__ ,add_prefix_space=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=snake_case__ ,add_prefix_space=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ ,snake_case__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = 'Encode this sequence.'
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE_ : int = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ ,add_prefix_space=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ ,add_prefix_space=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(snake_case__ ,snake_case__ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(snake_case__ ,snake_case__ )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE_ : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ )} ) # mask token has a left space
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_tokens_to_ids(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE_ : List[Any] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = encoded.index(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = encoded.index(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
pass
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_r.encode_plus(snake_case__ ,add_special_tokens=snake_case__ ,return_token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_p.encode_plus(snake_case__ ,add_special_tokens=snake_case__ ,return_token_type_ids=snake_case__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case__ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
snake_case__ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def snake_case ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,snake_case__ )
self.assertEqual(post_processor_state['add_prefix_space'] ,snake_case__ )
self.assertEqual(post_processor_state['trim_offsets'] ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : List[str] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Dict = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Tuple = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Dict = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ), len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ), len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : List[Any] = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ), 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ), 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
| 105 |
'''simple docstring'''
from math import pow
def UpperCAmelCase ( A : int , A : int , A : int , A : int , A : int , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE : Tuple = int(pow(A , A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = backtrack(
A , A , current_number + 1 , A , A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = backtrack(
A , A , current_number + 1 , A , A )
return current_sum, solutions_count
def UpperCAmelCase ( A : int , A : int ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(A , A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
a_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : int ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Tuple ):
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
if weight_type is not None:
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
elif weight_type == "inv_freq":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase )
if "pos_bias_u" in name:
__lowerCamelCase = None
elif "pos_bias_v" in name:
__lowerCamelCase = None
elif "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = '''weight'''
elif "running_mean" in name:
__lowerCamelCase = '''running_mean'''
elif "inv_freq" in name:
__lowerCamelCase = '''inv_freq'''
elif "running_var" in name:
__lowerCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
__lowerCamelCase = '''num_batches_tracked'''
else:
__lowerCamelCase = None
set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Dict ,_UpperCamelCase : Any=None ,_UpperCamelCase : int=None ,_UpperCamelCase : Dict=True ):
if config_path is not None:
__lowerCamelCase = WavaVecaConformerConfig.from_pretrained(_UpperCamelCase ,hidden_act='''swish''' )
else:
__lowerCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowerCamelCase = '''rotary'''
if is_finetuned:
if dict_path:
__lowerCamelCase = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase = target_dict.pad_index
__lowerCamelCase = target_dict.bos_index
__lowerCamelCase = target_dict.eos_index
__lowerCamelCase = len(target_dict.symbols )
__lowerCamelCase = os.path.join(_UpperCamelCase ,'''vocab.json''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase )
__lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase = 0
__lowerCamelCase = 1
with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = WavaVecaCTCTokenizer(
_UpperCamelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=_UpperCamelCase ,)
__lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,)
__lowerCamelCase = WavaVecaProcessor(feature_extractor=_UpperCamelCase ,tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__lowerCamelCase = WavaVecaConformerForCTC(_UpperCamelCase )
else:
__lowerCamelCase = WavaVecaConformerForPreTraining(_UpperCamelCase )
if is_finetuned:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowerCamelCase = argparse.Namespace(task='''audio_pretraining''' )
__lowerCamelCase = fairseq.tasks.setup_task(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=_UpperCamelCase )
__lowerCamelCase = model[0].eval()
recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ,not is_finetuned )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 622 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length, 2) ,_UpperCamelCase )
else:
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length) ,_UpperCamelCase )
for i, tensor in enumerate(_UpperCamelCase ):
if padding_side == "right":
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
else:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = ord(_UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__lowerCamelCase = unicodedata.category(_UpperCamelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = -1_0_0
lowerCAmelCase__ = "pt"
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
import torch
__lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCamelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCamelCase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__lowerCamelCase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowerCamelCase = self.tokenizer.padding_side
if padding_side == "right":
__lowerCamelCase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowerCamelCase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowerCamelCase = [feature['''ner_tags'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = [feature['''original_entity_spans'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 622 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
# Initialise PyTorch model
a_ : int = BigBirdConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
a_ : Union[str, Any] = BigBirdForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
else:
a_ : Optional[int] = BigBirdForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, is_trivia_qa=SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 237 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase_ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase_ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase_ = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self: Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def _snake_case ( self: Optional[int] , a: Any , a: Dict ):
__lowerCamelCase : Optional[Any] = 0.0
for i, j in zip(__UpperCamelCase , __UpperCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(__UpperCamelCase , __UpperCamelCase ) else 0.0
__lowerCamelCase : Union[str, Any] = n_correct / len(__UpperCamelCase )
return {
"accuracy": accuracy,
}
| 715 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Tuple = nn.functional.normalize(SCREAMING_SNAKE_CASE__ )
return torch.mm(SCREAMING_SNAKE_CASE__ , normalized_text_embeds.t() )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ["""CLIPEncoderLayer"""]
def __init__( self: List[Any] , a: CLIPConfig ):
super().__init__(a )
__lowerCamelCase : List[str] = CLIPVisionModel(config.vision_config )
__lowerCamelCase : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=a )
__lowerCamelCase : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=a )
__lowerCamelCase : List[str] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=a )
__lowerCamelCase : Any = nn.Parameter(torch.ones(17 ) , requires_grad=a )
__lowerCamelCase : Any = nn.Parameter(torch.ones(3 ) , requires_grad=a )
@torch.no_grad()
def _snake_case ( self: Any , a: List[Any] , a: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = self.vision_model(a )[1] # pooled_output
__lowerCamelCase : Dict = self.visual_projection(a )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCamelCase : int = cosine_distance(a , self.special_care_embeds ).cpu().float().numpy()
__lowerCamelCase : Optional[int] = cosine_distance(a , self.concept_embeds ).cpu().float().numpy()
__lowerCamelCase : List[str] = []
__lowerCamelCase : Tuple = image_embeds.shape[0]
for i in range(a ):
__lowerCamelCase : int = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__lowerCamelCase : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__lowerCamelCase : List[Any] = special_cos_dist[i][concept_idx]
__lowerCamelCase : str = self.special_care_embeds_weights[concept_idx].item()
__lowerCamelCase : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
__lowerCamelCase : Optional[Any] = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
__lowerCamelCase : Optional[Any] = cos_dist[i][concept_idx]
__lowerCamelCase : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__lowerCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(a )
result.append(a )
__lowerCamelCase : Tuple = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _snake_case ( self: str , a: torch.FloatTensor , a: torch.FloatTensor ):
__lowerCamelCase : Optional[int] = self.vision_model(a )[1] # pooled_output
__lowerCamelCase : str = self.visual_projection(a )
__lowerCamelCase : str = cosine_distance(a , self.special_care_embeds )
__lowerCamelCase : Dict = cosine_distance(a , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__lowerCamelCase : List[str] = 0.0
__lowerCamelCase : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__lowerCamelCase : int = torch.any(special_scores > 0 , dim=1 )
__lowerCamelCase : List[str] = special_care * 0.0_1
__lowerCamelCase : str = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__lowerCamelCase : Dict = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__lowerCamelCase : Optional[Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 230 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = ["""model.decoder.embed_positions.weights"""]
def __lowerCAmelCase ( A_ : Any ) -> Union[str, Any]:
if "emb" in name:
__UpperCAmelCase = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
__UpperCAmelCase = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
__UpperCAmelCase = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
__UpperCAmelCase = name.replace("linear1" , "fc1" )
if "linear2" in name:
__UpperCAmelCase = name.replace("linear2" , "fc2" )
if "norm1" in name:
__UpperCAmelCase = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
__UpperCAmelCase = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
__UpperCAmelCase = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
__UpperCAmelCase = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
__UpperCAmelCase = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCAmelCase = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def __lowerCAmelCase ( A_ : OrderedDict , A_ : int ) -> Tuple[Dict, Dict]:
__UpperCAmelCase = list(state_dict.keys() )
__UpperCAmelCase = {}
for key in keys:
__UpperCAmelCase = state_dict.pop(A_ )
__UpperCAmelCase = rename_keys(A_ )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCAmelCase = val[:hidden_size, :]
__UpperCAmelCase = val[hidden_size : 2 * hidden_size, :]
__UpperCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def __lowerCAmelCase ( A_ : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__UpperCAmelCase = 10_24
__UpperCAmelCase = 24
__UpperCAmelCase = 16
elif checkpoint == "medium":
__UpperCAmelCase = 15_36
__UpperCAmelCase = 48
__UpperCAmelCase = 24
elif checkpoint == "large":
__UpperCAmelCase = 20_48
__UpperCAmelCase = 48
__UpperCAmelCase = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
__UpperCAmelCase = MusicgenDecoderConfig(
hidden_size=A_ , ffn_dim=hidden_size * 4 , num_hidden_layers=A_ , num_attention_heads=A_ , )
return config
@torch.no_grad()
def __lowerCAmelCase ( A_ : Optional[Any] , A_ : Optional[Any]=None , A_ : str=None , A_ : List[Any]="cpu" ) -> Tuple:
__UpperCAmelCase = MusicGen.get_pretrained(A_ , device=A_ )
__UpperCAmelCase = decoder_config_from_checkpoint(A_ )
__UpperCAmelCase = fairseq_model.lm.state_dict()
__UpperCAmelCase , __UpperCAmelCase = rename_state_dict(
A_ , hidden_size=decoder_config.hidden_size )
__UpperCAmelCase = TaEncoderModel.from_pretrained("t5-base" )
__UpperCAmelCase = EncodecModel.from_pretrained("facebook/encodec_32khz" )
__UpperCAmelCase = MusicgenForCausalLM(A_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCAmelCase , __UpperCAmelCase = decoder.load_state_dict(A_ , strict=A_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(A_ )
if len(A_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(A_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
__UpperCAmelCase = MusicgenForConditionalGeneration(text_encoder=A_ , audio_encoder=A_ , decoder=A_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(A_ )
# check we can do a forward pass
__UpperCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCAmelCase = model(input_ids=A_ , decoder_input_ids=A_ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
__UpperCAmelCase = AutoTokenizer.from_pretrained("t5-base" )
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
__UpperCAmelCase = MusicgenProcessor(feature_extractor=A_ , tokenizer=A_ )
# set the appropriate bos/pad token ids
__UpperCAmelCase = 20_48
__UpperCAmelCase = 20_48
# set other default generation config params
__UpperCAmelCase = int(30 * audio_encoder.config.frame_rate )
__UpperCAmelCase = True
__UpperCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(A_ ).mkdir(exist_ok=A_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(A_ )
processor.save_pretrained(A_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(A_ )
processor.push_to_hub(A_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
a_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 221 | from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowerCAmelCase ( A_ : Optional[int] ) -> Optional[int]:
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowerCAmelCase ( ) -> Dict:
__UpperCAmelCase = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=A_ )
__UpperCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(A_ )
EnvironmentCommand.register_subcommand(A_ )
TestCommand.register_subcommand(A_ )
RunBeamCommand.register_subcommand(A_ )
DummyDataCommand.register_subcommand(A_ )
# Parse args
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
if not hasattr(A_ , "func" ):
parser.print_help()
exit(1 )
__UpperCAmelCase = parse_unknown_args(A_ )
# Run
__UpperCAmelCase = args.func(A_ , **A_ )
service.run()
if __name__ == "__main__":
main()
| 221 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__lowerCAmelCase : Optional[int] = gray_code_sequence_string(SCREAMING_SNAKE_CASE_ )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
__lowerCAmelCase : str = int(sequence[i] , 2 )
return sequence
def __lowerCAmelCase (_UpperCamelCase ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
__lowerCAmelCase : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowerCAmelCase : Optional[Any] = '0' + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowerCAmelCase : Any = '1' + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod() | 700 |
"""simple docstring"""
# Imports
import numpy as np
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if red is not None:
__lowerCAmelCase : Optional[int] = red
if green is not None:
__lowerCAmelCase : Any = green
if blue is not None:
__lowerCAmelCase : str = blue
if red_edge is not None:
__lowerCAmelCase : Dict = red_edge
if nir is not None:
__lowerCAmelCase : Dict = nir
return True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def __lowerCamelCase ( self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowerCamelCase ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowerCamelCase ( self ):
return self.nir * (self.red / (self.green**2))
def __lowerCamelCase ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowerCamelCase ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def __lowerCamelCase ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowerCamelCase ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowerCamelCase ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowerCamelCase ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.08 , _SCREAMING_SNAKE_CASE=1.22 , _SCREAMING_SNAKE_CASE=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowerCamelCase ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowerCamelCase ( self ):
return (self.nir / self.green) - 1
def __lowerCamelCase ( self ):
return (self.nir / self.redEdge) - 1
def __lowerCamelCase ( self ):
return (self.red - self.blue) / self.red
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __lowerCamelCase ( self ):
return self.nir - self.green
def __lowerCamelCase ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowerCamelCase ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
return (self.nir - b) / (a * self.red)
def __lowerCamelCase ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowerCamelCase ( self ):
return (self.red + self.green + self.blue) / 30.5
def __lowerCamelCase ( self ):
return self.nir / self.red
def __lowerCamelCase ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowerCamelCase ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowerCamelCase ( self ):
return self.green / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return self.nir / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return self.red / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return (self.green - self.red) / (self.green + self.red)
def __lowerCamelCase ( self ):
return (self.red - self.green) / (self.red + self.green)
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __lowerCamelCase ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowerCamelCase ( self ):
return self.nir / self.red
def __lowerCamelCase ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowerCamelCase ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 549 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_a : int = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def _a (lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = test_results.split(' ' )
__snake_case = 0
__snake_case = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__snake_case = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = {}
__snake_case = None
__snake_case = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , lowercase__ ):
__snake_case = True
__snake_case = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
__snake_case = line
__snake_case = False
return failures
class _lowercase :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
__snake_case = title
__snake_case = doc_test_results['time_spent'].split(',' )[0]
__snake_case = doc_test_results['success']
__snake_case = doc_test_results['failures']
__snake_case = self.n_success + self.n_failures
# Failures and success of the modeling tests
__snake_case = doc_test_results
@property
def a ( self : List[str] ) -> str:
__snake_case = [self._time_spent]
__snake_case = 0
for time in time_spent:
__snake_case = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE_ ) == 1:
__snake_case = [0, 0, time_parts[0]]
__snake_case , __snake_case , __snake_case = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__snake_case , __snake_case , __snake_case = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'{int(SCREAMING_SNAKE_CASE_ )}h{int(SCREAMING_SNAKE_CASE_ )}m{int(SCREAMING_SNAKE_CASE_ )}s'
@property
def a ( self : Any ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def a ( self : Tuple ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def a ( self : List[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def a ( self : Optional[int] ) -> Dict:
__snake_case = 40
__snake_case = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
__snake_case = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def a ( self : Union[str, Any] ) -> str:
__snake_case = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE_ )
@staticmethod
def a ( ) -> Union[str, Any]:
__snake_case = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE_ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE_ , )
def a ( self : Optional[Any] ) -> Dict:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
__snake_case = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
__snake_case = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE_ , )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
__snake_case = ''
for key, value in failures.items():
__snake_case = value[:200] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE_ ) > 250 else value
failures_text += f'*{key}*\n_{value}_\n\n'
__snake_case = job_name
__snake_case = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
__snake_case = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def a ( self : int ) -> Dict:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
__snake_case = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
__snake_case = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
__snake_case = f'*Num failures* :{len(job_result["failed"] )} \n'
__snake_case = job_result['failures']
__snake_case = self.get_reply_blocks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=SCREAMING_SNAKE_CASE_ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def _a () -> Tuple:
"""simple docstring"""
__snake_case = os.environ['GITHUB_RUN_ID']
__snake_case = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
__snake_case = requests.get(lowercase__ ).json()
__snake_case = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
__snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
__snake_case = requests.get(url + f'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , lowercase__ )
return {}
def _a (lowercase__ : str ) -> int:
"""simple docstring"""
__snake_case = {}
if os.path.exists(lowercase__ ):
__snake_case = os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding='utf-8' ) as f:
__snake_case = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def _a () -> List[str]:
"""simple docstring"""
class _lowercase :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> int:
__snake_case = name
__snake_case = []
def __str__( self : Optional[int] ) -> Union[str, Any]:
return self.name
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> Any:
self.paths.append({'name': self.name, 'path': path} )
__snake_case = {}
__snake_case = filter(os.path.isdir , os.listdir() )
for directory in directories:
__snake_case = directory
if artifact_name not in _available_artifacts:
__snake_case = Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
_a : Any = get_job_links()
_a : Optional[int] = retrieve_available_artifacts()
_a : Dict = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_a : Dict = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_a : Optional[int] = github_actions_job_links.get("run_doctests")
_a : str = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_a : Union[str, Any] = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_a , _a , _a : Optional[Any] = handle_test_results(artifact["stats"])
_a : Optional[int] = failed
_a : Tuple = success
_a : Dict = time_spent[1:-1] + ", "
_a : int = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_a : Tuple = line.replace("FAILED ", "")
_a : List[str] = line.split()[0].replace("\n", "")
if "::" in line:
_a , _a : List[Any] = line.split("::")
else:
_a , _a : List[str] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_a : int = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_a : Optional[int] = all_failures[test] if test in all_failures else "N/A"
_a : Tuple = failure
break
_a : Any = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 56 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str ):
"""simple docstring"""
if "://" in dataset_path:
snake_case : List[str] = dataset_path.split("://" )[1]
return dataset_path
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: fsspec.AbstractFileSystem ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: fsspec.AbstractFileSystem , lowerCamelCase_: str , lowerCamelCase_: str ):
"""simple docstring"""
snake_case : int = not is_remote_filesystem(lowerCamelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) )
else:
fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case : Union[str, Any] = None
snake_case : Optional[int] = None
snake_case : List[str] = threading.Lock()
| 449 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A )
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = 50 ,_A = None ,_A = "pil" ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = self.unet.config.sample_size
_lowerCAmelCase : List[Any] = (batch_size, 3, img_size, img_size)
_lowerCAmelCase : List[str] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowerCAmelCase : Optional[Any] = randn_tensor(_A ,generator=_A ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_lowerCAmelCase : Tuple = self.scheduler.schedule[t]
_lowerCAmelCase : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowerCAmelCase, _lowerCAmelCase : Any = self.scheduler.add_noise_to_input(_A ,_A ,generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowerCAmelCase : Any = self.scheduler.step(_A ,_A ,_A ,_A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : List[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
_lowerCAmelCase : Any = self.scheduler.step_correct(
_A ,_A ,_A ,_A ,step_output.prev_sample ,step_output['derivative'] ,)
_lowerCAmelCase : List[Any] = step_output.prev_sample
_lowerCAmelCase : Any = (sample / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : str = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_lowerCAmelCase : Dict = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 16 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _lowercase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=64 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=[1, 16, 4, 4] , UpperCAmelCase=None , ):
'''simple docstring'''
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = scope
_lowercase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_lowercase = (self.image_size // 32) ** 2
_lowercase = num_patches + 1
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase , )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = ViTHybridModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = self.type_sequence_label_size
_lowercase = ViTHybridForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = ViTHybridModelTester(self )
_lowercase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(UpperCAmelCase )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
_lowercase = model_class(config=UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_lowercase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = ViTHybridModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowercase = model(**UpperCAmelCase )
# verify the logits
_lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_lowercase = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
_lowercase = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
_lowercase = prepare_img()
_lowercase = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
_lowercase = model(**UpperCAmelCase )
_lowercase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_lowercase = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 398 | import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = CLIPTokenizer
lowerCAmelCase__ = CLIPTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowercase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
_lowercase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_lowercase = {"""unk_token""": """<unk>"""}
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase ) )
def _UpperCAmelCase ( self , **UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def _UpperCAmelCase ( self , **UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = """lower newer"""
_lowercase = """lower newer"""
return input_text, output_text
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowercase = """lower newer"""
_lowercase = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_lowercase = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_lowercase = tokens + [tokenizer.unk_token]
_lowercase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
@require_ftfy
def _UpperCAmelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
_lowercase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
_lowercase = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_lowercase = tokenizer_s.tokenize(UpperCAmelCase )
_lowercase = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_lowercase = """xa\u0303y""" + """ """ + """x\xe3y"""
_lowercase = tokenizer_s.tokenize(UpperCAmelCase )
_lowercase = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that the tokenization is identical on unicode of space type
_lowercase = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_lowercase = tokenizer_s.tokenize(UpperCAmelCase )
_lowercase = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that the tokenization is identical on unicode of line break type
_lowercase = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_lowercase = tokenizer_s.tokenize(UpperCAmelCase )
_lowercase = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_lowercase = F'''{text_of_1_token} {text_of_1_token}'''
_lowercase = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , )
_lowercase = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase ) + 1, len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
_lowercase = F''' {text}'''
_lowercase = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , )
_lowercase = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ) + 1, 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 398 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __UpperCamelCase ( __lowerCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
_a = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_a = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_a = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_a = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_a = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_a = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_a = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_a = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_a = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_a = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_a = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_a = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_a = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_a = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_a = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_a = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_a = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_a = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_a = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_a = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_a = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_a = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_a = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_a = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def __UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_a = key.split("." )
_a , _a = int(key_split[2] ), int(key_split[4] )
_a = config.vision_config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_a = key.split("." )
_a = int(key_split[3] )
_a = config.text_config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[
dim : dim * 2, :
]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = rename_key(__lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_a = val.squeeze_()
else:
_a = val
return orig_state_dict
def __UpperCamelCase ( ) -> Dict:
'''simple docstring'''
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : int="groupvit-gcc-yfcc" , __lowerCamelCase : List[str]=False ) -> Tuple:
'''simple docstring'''
_a = GroupViTConfig()
_a = GroupViTModel(__lowerCamelCase ).eval()
_a = torch.load(__lowerCamelCase , map_location="cpu" )["model"]
_a = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
_a , _a = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__lowerCamelCase ) == 0)
# verify result
_a = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_a = prepare_img()
_a = processor(text=["a photo of a cat", "a photo of a dog"] , images=__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
with torch.no_grad():
_a = model(**__lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
_a = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
_a = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(F"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , __lowerCamelCase , atol=1E-3 )
processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
print("Successfully saved processor and model to" , __lowerCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(__lowerCamelCase , organization="nielsr" )
model.push_to_hub(__lowerCamelCase , organization="nielsr" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
lowercase__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 276 |
'''simple docstring'''
lowercase__ = 65_521
def __UpperCamelCase ( __lowerCamelCase : str ) -> int:
'''simple docstring'''
_a = 1
_a = 0
for plain_chr in plain_text:
_a = (a + ord(__lowerCamelCase )) % MOD_ADLER
_a = (b + a) % MOD_ADLER
return (b << 16) | a
| 276 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : str =argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Optional[Any] =parser.parse_args()
_lowercase : Optional[Any] ="cpu"
_lowercase : List[str] ="a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : str ="path-to-your-trained-model"
_lowercase : Optional[Any] =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Union[str, Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Union[str, Any] =pipe.to(device)
# to channels last
_lowercase : int =pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[str] =pipe.vae.to(memory_format=torch.channels_last)
_lowercase : int =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Union[str, Any] =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : str =torch.randn(2, 4, 64, 64)
_lowercase : Any =torch.rand(1) * 999
_lowercase : List[str] =torch.randn(2, 77, 768)
_lowercase : Tuple =(sample, timestep, encoder_hidden_status)
try:
_lowercase : Optional[int] =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : List[Any] =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : Optional[int] =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : Optional[Any] =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : Optional[Any] =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : str =666
_lowercase : Dict =torch.Generator(device).manual_seed(seed)
_lowercase : Tuple ={"generator": generator}
if args.steps is not None:
_lowercase : Tuple =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : Tuple =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 136 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[int] =16
_lowercase : List[str] =32
def lowerCAmelCase_ ( _lowercase : Accelerator , _lowercase : int = 16) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""")
a__ : Optional[int] = load_dataset("""glue""" , """mrpc""")
def tokenize_function(_lowercase : Optional[int]):
# max_length=None => use the model max length (it's actually the default)
a__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Optional[int] = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Any = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(_lowercase : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
a__ : List[Any] = 8
else:
a__ : Any = None
return tokenizer.pad(
_lowercase , padding="""longest""" , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
a__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase , drop_last=_lowercase)
a__ : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : int) -> List[str]:
"""simple docstring"""
# Initialize accelerator
a__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : List[Any] = config["""lr"""]
a__ : int = int(config["""num_epochs"""])
a__ : Tuple = int(config["""seed"""])
a__ : Dict = int(config["""batch_size"""])
a__ : Dict = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
a__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
a__ : str = MAX_GPU_BATCH_SIZE
set_seed(_lowercase)
a__ , a__ : Any = get_dataloaders(_lowercase , _lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : Dict = model.to(accelerator.device)
# Instantiate optimizer
a__ : Any = AdamW(params=model.parameters() , lr=_lowercase)
# Instantiate scheduler
a__ : List[str] = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=100 , num_training_steps=(len(_lowercase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Tuple = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# Now we train the model
for epoch in range(_lowercase):
model.train()
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
a__ : List[str] = model(**_lowercase)
a__ : List[str] = outputs.loss
a__ : str = loss / gradient_accumulation_steps
accelerator.backward(_lowercase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a__ : Any = model(**_lowercase)
a__ : Optional[Any] = outputs.logits.argmax(dim=-1)
a__ , a__ : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
a__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowercase)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=_lowercase , default=_lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
a__ : Optional[int] = parser.parse_args()
a__ : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowercase , _lowercase)
if __name__ == "__main__":
main()
| 136 | 1 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __snake_case ):
__magic_name__ = (DDIMParallelScheduler,)
__magic_name__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__UpperCamelCase )
return config
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Any = self.scheduler_classes[0]
A : Optional[int] = self.get_scheduler_config(**__UpperCamelCase )
A : Union[str, Any] = scheduler_class(**__UpperCamelCase )
A, A : Dict = 10, 0.0
A : Union[str, Any] = self.dummy_model()
A : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for t in scheduler.timesteps:
A : List[str] = model(__UpperCamelCase , __UpperCamelCase )
A : Any = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__UpperCamelCase )
A : str = self.scheduler_classes[0]
A : Dict = self.get_scheduler_config(steps_offset=1 )
A : List[str] = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__UpperCamelCase , num_inference_steps=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__UpperCamelCase , eta=__UpperCamelCase )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = self.scheduler_classes[0]
A : Tuple = self.get_scheduler_config()
A : Union[str, Any] = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Dict = self.scheduler_classes[0]
A : str = self.get_scheduler_config()
A : Optional[Any] = scheduler_class(**__UpperCamelCase )
A, A : Optional[int] = 10, 0.0
scheduler.set_timesteps(__UpperCamelCase )
A : int = self.dummy_model()
A : List[str] = self.dummy_sample_deter
A : Union[str, Any] = self.dummy_sample_deter + 0.1
A : int = self.dummy_sample_deter - 0.1
A : Union[str, Any] = samplea.shape[0]
A : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
A : List[str] = torch.arange(__UpperCamelCase )[0:3, None].repeat(1 , __UpperCamelCase )
A : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A : str = scheduler.batch_step_no_noise(__UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __UpperCamelCase )
A : Any = torch.sum(torch.abs(__UpperCamelCase ) )
A : List[str] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Any = self.full_loop()
A : Any = torch.sum(torch.abs(__UpperCamelCase ) )
A : Dict = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : int = self.full_loop(prediction_type='''v_prediction''' )
A : int = torch.sum(torch.abs(__UpperCamelCase ) )
A : Union[str, Any] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Tuple = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
A : Tuple = torch.sum(torch.abs(__UpperCamelCase ) )
A : Dict = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
A : str = torch.sum(torch.abs(__UpperCamelCase ) )
A : int = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 702 |
'''simple docstring'''
import numpy as np
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = int(np.ceil((x_end - xa) / h ) )
A : Optional[Any] = np.zeros((n + 1,) )
A : List[Any] = ya
A : Dict = xa
for k in range(snake_case__ ):
A : Union[str, Any] = f(snake_case__ , y[k] )
A : Optional[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A : List[Any] = f(x + h , y[k] + h * ka )
A : Any = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return " ".join(
"""""".join(word[::-1] ) if len(UpperCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 26 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
snake_case__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ):
for pegasus_name, hf_name in PATTERNS:
lowercase : int = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : dict ):
lowercase : Optional[int] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase_ )
lowercase : Optional[Any] = PegasusConfig(**UpperCAmelCase_ )
lowercase : List[Any] = PegasusForConditionalGeneration(UpperCAmelCase_ )
lowercase : Optional[Any] = torch_model.model.state_dict()
lowercase : Optional[int] = {}
for k, v in tf_weights.items():
lowercase : str = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase : List[str] = v.T
lowercase : str = torch.tensor(UpperCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase : Any = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
lowercase : Union[str, Any] = mapping['''shared.weight''']
lowercase : str = mapping['''shared.weight''']
lowercase : List[Any] = {k: torch.zeros_like(UpperCAmelCase_ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**UpperCAmelCase_ )
lowercase , lowercase : int = torch_model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
lowercase : List[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any]="./ckpt/aeslc/model.ckpt-32000" ):
lowercase : Dict = tf.train.list_variables(UpperCAmelCase_ )
lowercase : Optional[Any] = {}
lowercase : Optional[int] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(UpperCAmelCase_ , desc='''converting tf checkpoint to dict''' ):
lowercase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase : Tuple = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase : List[str] = array
return tf_weights
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
# save tokenizer first
lowercase : str = Path(UpperCAmelCase_ ).parent.name
lowercase : List[Any] = task_specific_params[f'''summarization_{dataset}''']['''max_position_embeddings''']
lowercase : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=UpperCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase_ )
# convert model
lowercase : List[str] = get_tf_weights_as_numpy(UpperCAmelCase_ )
lowercase : Any = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
lowercase : Optional[int] = task_specific_params
lowercase : str = convert_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
lowercase : Any = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(UpperCAmelCase_ , Path(UpperCAmelCase_ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
snake_case__ = parser.parse_args()
if args.save_dir is None:
snake_case__ = Path(args.tf_ckpt_path).parent.name
snake_case__ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 583 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase__ = Features({"""audio""": Audio()} )
UpperCamelCase__ = Features({"""labels""": ClassLabel} )
UpperCamelCase__ = """audio"""
UpperCamelCase__ = """labels"""
def __lowercase ( self , UpperCAmelCase_) -> str:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowercase__: List[str] = copy.deepcopy(self)
lowercase__: Any = self.label_schema.copy()
lowercase__: Dict = features[self.label_column]
lowercase__: int = label_schema
return task_template
@property
def __lowercase ( self) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 705 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _a ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = FlaxAutoencoderKL
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: Dict = 4
lowercase__: str = 3
lowercase__: List[Any] = (32, 32)
lowercase__: str = jax.random.PRNGKey(0)
lowercase__: Tuple = jax.random.uniform(UpperCAmelCase_ , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: Any = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowercase__: Any = self.dummy_input
return init_dict, inputs_dict
| 120 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowercase ( unittest.TestCase ):
__a = MODEL_FOR_CAUSAL_LM_MAPPING
__a = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ : Optional[int] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ : Dict = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE__ , num_return_sequences=2 , return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )},
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )},
] , )
lowerCAmelCase__ : Any = text_generator.model.config.eos_token_id
lowerCAmelCase__ : Union[str, Any] = '''<pad>'''
lowerCAmelCase__ : Dict = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=SCREAMING_SNAKE_CASE__ , num_return_sequences=2 , batch_size=2 , return_tensors=SCREAMING_SNAKE_CASE__ , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
[
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )},
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )},
],
[
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )},
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )},
],
] , )
@require_tf
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : Union[str, Any] = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ : List[Any] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : str = TextGenerationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
return text_generator, ["This is a test", "Another test"]
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : int = '''Hello I believe in'''
lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Dict = text_generator(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ : Union[str, Any] = text_generator(SCREAMING_SNAKE_CASE__ , stop_sequence=''' fe''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = text_generator.model
lowerCAmelCase__ : Any = text_generator.tokenizer
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Dict = text_generator('''This is a test''' , return_full_text=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : Optional[Any] = pipeline(task='''text-generation''' , model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , return_full_text=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Tuple = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}],
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ : str = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}],
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ : Optional[Any] = text_generator('''test''' , return_full_text=SCREAMING_SNAKE_CASE__ , return_text=SCREAMING_SNAKE_CASE__ )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ : Tuple = text_generator('''test''' , return_full_text=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ : Dict = text_generator('''test''' , return_text=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ : Optional[Any] = text_generator('''''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ : Tuple = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ : Optional[int] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowercase_ ( self ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowerCAmelCase__ : Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : List[Any] = pipe('''This is a test''' )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ : Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Optional[int] = pipe('''This is a test''' )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ : List[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ : Dict = pipe('''This is a test''' )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def lowercase_ ( self ):
"""simple docstring"""
import torch
lowerCAmelCase__ : Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def lowercase_ ( self ):
"""simple docstring"""
import torch
lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE__ , top_p=0.5 )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = '''Hello world'''
lowerCAmelCase__ : Tuple = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ : Optional[int] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ : Optional[Any] = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ : str = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
lowerCAmelCase__ : Optional[int] = text_generator(SCREAMING_SNAKE_CASE__ , max_length=10 , max_new_tokens=1 )
self.assertIn(SCREAMING_SNAKE_CASE__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
lowerCAmelCase__ : Optional[int] = text_generator(SCREAMING_SNAKE_CASE__ , max_new_tokens=1 )
self.assertNotIn(SCREAMING_SNAKE_CASE__ , cl.out )
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
lowerCAmelCase__ : List[Any] = text_generator(SCREAMING_SNAKE_CASE__ , max_length=10 )
self.assertNotIn(SCREAMING_SNAKE_CASE__ , cl.out )
| 233 |
def _a ( __UpperCamelCase : List[Any] ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
lowerCAmelCase__ : Optional[Any] = len(__UpperCamelCase )
lowerCAmelCase__ : Dict = max(__UpperCamelCase )
lowerCAmelCase__ : Optional[int] = min(__UpperCamelCase )
# create the counting array
lowerCAmelCase__ : Any = coll_max + 1 - coll_min
lowerCAmelCase__ : Any = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 ,__UpperCamelCase ):
lowerCAmelCase__ : Any = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowerCAmelCase__ : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 ,__UpperCamelCase ) ):
lowerCAmelCase__ : Optional[int] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _a ( __UpperCamelCase : int ):
return "".join([chr(__UpperCamelCase ) for i in counting_sort([ord(__UpperCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
A__ : str = input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[str] = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 233 | 1 |
"""simple docstring"""
def a__ ( __lowercase = 100_0000 ) -> int:
_A = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , snake_case__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 706 |
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 621 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'BlipImageProcessor'
UpperCamelCase__ = 'AutoTokenizer'
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
# add QFormer tokenizer
lowercase =qformer_tokenizer
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowercase =BatchFeature()
if text is not None:
lowercase =self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
encoding.update(snake_case_ )
lowercase =self.qformer_tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
lowercase =qformer_text_encoding.pop('''input_ids''' )
lowercase =qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowercase =self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _A( self ):
lowercase =self.tokenizer.model_input_names
lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _A( self , snake_case_ , **snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
lowercase =os.path.join(snake_case_ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(snake_case_ )
return super().save_pretrained(snake_case_ , **snake_case_ )
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
lowercase =AutoTokenizer.from_pretrained(snake_case_ , subfolder='''qformer_tokenizer''' )
lowercase =cls._get_arguments_from_pretrained(snake_case_ , **snake_case_ )
args.append(snake_case_ )
return cls(*snake_case_ )
| 72 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 319 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , ) -> Dict:
snake_case_ = parent
snake_case_ = 13
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 2
snake_case_ = 99
snake_case_ = 0
snake_case_ = 32
snake_case_ = 2
snake_case_ = 4
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 5_12
snake_case_ = 16
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = 'last'
snake_case_ = True
snake_case_ = None
snake_case_ = 0
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
snake_case_ = None
if self.use_input_lengths:
snake_case_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self , a , a , a , a , a , a , a , a , a , ) -> Dict:
snake_case_ = TFFlaubertModel(config=a )
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
snake_case_ = model(a )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a , a , a , a , a , a , ) -> Dict:
snake_case_ = TFFlaubertWithLMHeadModel(a )
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
snake_case_ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a , a , a , a , a , a , a , ) -> Tuple:
snake_case_ = TFFlaubertForQuestionAnsweringSimple(a )
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths}
snake_case_ = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , a , a , a , a , a , a , a , a , a , ) -> Optional[Any]:
snake_case_ = TFFlaubertForSequenceClassification(a )
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths}
snake_case_ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self , a , a , a , a , a , a , a , a , a , ) -> Union[str, Any]:
snake_case_ = self.num_labels
snake_case_ = TFFlaubertForTokenClassification(config=a )
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case_ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , a , a , a , a , a , a , a , a , a , ) -> int:
snake_case_ = self.num_choices
snake_case_ = TFFlaubertForMultipleChoice(config=a )
snake_case_ = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
snake_case_ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self , a , a , a , a , a ) -> List[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self ) -> str:
snake_case_ = TFFlaubertModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , emb_dim=37 )
def _UpperCamelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a )
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a )
def _UpperCamelCase ( self ) -> str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*a )
@slow
def _UpperCamelCase ( self ) -> str:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFFlaubertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> str:
snake_case_ = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
snake_case_ = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
snake_case_ = model(a )[0]
snake_case_ = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , a )
# compare the actual values for a slice.
snake_case_ = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 607 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase = logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 607 | 1 |
UpperCamelCase__ = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCamelCase__ = concatenate_datasets
UpperCamelCase__ = DownloadConfig
UpperCamelCase__ = DownloadManager
UpperCamelCase__ = DownloadMode
UpperCamelCase__ = DownloadConfig
UpperCamelCase__ = DownloadMode
UpperCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 268 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def A__ ( A__ ) -> Optional[int]:
'''simple docstring'''
return np.maximum(0 , A__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 426 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
SCREAMING_SNAKE_CASE = 'true'
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase=82 ,__UpperCAmelCase=16 ):
"""simple docstring"""
set_seed(42 )
_lowercase : Optional[int] = RegressionModel()
_lowercase : List[str] = deepcopy(__SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
_lowercase : str = DataLoader(__SCREAMING_SNAKE_CASE ,batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
_lowercase : Dict = accelerator.prepare(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase=False ):
"""simple docstring"""
_lowercase : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
_lowercase : Tuple = load_dataset('glue' ,'mrpc' ,split='validation' )
def tokenize_function(__UpperCAmelCase ):
_lowercase : Optional[int] = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
_lowercase : Tuple = dataset.map(
__SCREAMING_SNAKE_CASE ,batched=__SCREAMING_SNAKE_CASE ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
_lowercase : Dict = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCAmelCase ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE ,padding='longest' ,return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE ,padding='max_length' ,max_length=128 ,return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE ,shuffle=__SCREAMING_SNAKE_CASE ,collate_fn=__SCREAMING_SNAKE_CASE ,batch_size=16 )
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
_lowercase : Any = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE ,split_batches=__SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = get_dataloader(__SCREAMING_SNAKE_CASE ,not dispatch_batches )
_lowercase : Dict = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' ,return_dict=__SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = accelerator.prepare(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = []
for batch in dataloader:
_lowercase : Optional[Any] = batch.values()
with torch.no_grad():
_lowercase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowercase : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
_lowercase : int = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase=82 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=16 ):
"""simple docstring"""
_lowercase : List[str] = get_basic_setup(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = generate_predictions(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def __lowerCAmelCase( __UpperCAmelCase = False ,__UpperCAmelCase = False ):
"""simple docstring"""
_lowercase : Any = evaluate.load('glue' ,'mrpc' )
_lowercase : Optional[Any] = get_mrpc_setup(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# First do baseline
_lowercase : Any = setup["no"]
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
_lowercase : List[str] = model(**__SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE ,references=batch['labels'] )
_lowercase : int = metric.compute()
# Then do distributed
_lowercase : Dict = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowercase : Any = model(**__SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = outputs.logits.argmax(dim=-1 )
_lowercase : Union[str, Any] = batch["labels"]
_lowercase : Any = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE ,references=__SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Union[str, Any] = Accelerator(split_batches=__SCREAMING_SNAKE_CASE ,dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowercase : int = Accelerator(split_batches=__SCREAMING_SNAKE_CASE ,dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
_lowercase : Any = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE ,512 )
accelerator.state._reset_state()
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 704 | """simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,):
"""simple docstring"""
_lowercase : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
_lowercase : Optional[Any] = 1
_lowercase : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
_lowercase : List[Any] = init[0]
_lowercase : Optional[Any] = init[1]
_lowercase : Union[str, Any] = 0
_lowercase : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
_lowercase : Dict = [[f, g, x, y]]
_lowercase : Tuple = False # flag that is set when search is complete
_lowercase : Tuple = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowercase : List[Any] = cell.pop()
_lowercase : Optional[Any] = next_cell[2]
_lowercase : List[str] = next_cell[3]
_lowercase : Optional[int] = next_cell[1]
if x == goal[0] and y == goal[1]:
_lowercase : Optional[Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
_lowercase : Any = x + DIRECTIONS[i][0]
_lowercase : Union[str, Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowercase : int = g + cost
_lowercase : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowercase : List[Any] = 1
_lowercase : Dict = i
_lowercase : Union[str, Any] = []
_lowercase : Optional[int] = goal[0]
_lowercase : Any = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowercase : str = x - DIRECTIONS[action[x][y]][0]
_lowercase : Any = y - DIRECTIONS[action[x][y]][1]
_lowercase : Dict = xa
_lowercase : Tuple = ya
invpath.append([x, y] )
_lowercase : List[str] = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
SCREAMING_SNAKE_CASE = [0, 0]
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE = [len(grid) - 1, len(grid[0]) - 1]
SCREAMING_SNAKE_CASE = 1
# the cost map which pushes the path closer to the goal
SCREAMING_SNAKE_CASE = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
SCREAMING_SNAKE_CASE = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
SCREAMING_SNAKE_CASE = 99
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 283 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ : Optional[int] = TypeVar("""T""")
class lowercase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = data
UpperCamelCase : Node[T] | None = None
def __str__( self ):
'''simple docstring'''
return f"""{self.data}"""
class lowercase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase : Node[T] | None = None
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.top
while node:
yield node.data
UpperCamelCase : Dict = node.next
def __str__( self ):
'''simple docstring'''
return "->".join([str(_A ) for item in self] )
def __len__( self ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def _a ( self ):
'''simple docstring'''
return self.top is None
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = Node(_A )
if not self.is_empty():
UpperCamelCase : Optional[Any] = self.top
UpperCamelCase : Tuple = node
def _a ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , _A )
UpperCamelCase : Optional[Any] = self.top
UpperCamelCase : List[str] = self.top.next
return pop_node.data
def _a ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 102 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase = 1_6
_UpperCamelCase = 3_2
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> str:
return int(x / 2**2_0 )
class __a :
"""simple docstring"""
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase__ : str = torch.cuda.memory_allocated()
return self
def __exit__( self , *snake_case ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase__ : List[str] = torch.cuda.memory_allocated()
lowerCAmelCase__ : Optional[int] = torch.cuda.max_memory_allocated()
lowerCAmelCase__ : List[str] = bamb(self.end - self.begin )
lowerCAmelCase__ : Union[str, Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ = 1_6 , lowercase__ = "bert-base-cased" , lowercase__ = 3_2_0 , lowercase__ = 1_6_0 , ) -> str:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase__ : Optional[Any] = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ : str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : int = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(lowercase__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase__ : Tuple = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase__ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Dict:
# Initialize accelerator
lowerCAmelCase__ : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Union[str, Any] = config["lr"]
lowerCAmelCase__ : int = int(config["num_epochs"] )
lowerCAmelCase__ : Tuple = int(config["seed"] )
lowerCAmelCase__ : str = int(config["batch_size"] )
lowerCAmelCase__ : Any = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Any = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase__ : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ : Any = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[str] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase__ : Optional[int] = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ : str = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ : Tuple = 0
# Now we train the model
lowerCAmelCase__ : List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase__ : Optional[Any] = model(**lowercase__ )
lowerCAmelCase__ : Optional[int] = outputs.loss
lowerCAmelCase__ : Any = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase__ : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCAmelCase__ : int = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=lowercase__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase__ , )
parser.add_argument(
"--output_dir" , type=lowercase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=lowercase__ , default=lowercase__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=lowercase__ , default=3_2_0 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=lowercase__ , default=1_6_0 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of train epochs." , )
lowerCAmelCase__ : Optional[int] = parser.parse_args()
lowerCAmelCase__ : Dict = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 453 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A = logging.get_logger(__name__)
_A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Optional[int] = """bit"""
_lowerCamelCase : Tuple = ["""preactivation""", """bottleneck"""]
_lowerCamelCase : Dict = ["""SAME""", """VALID"""]
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=[256, 512, 1024, 2048] , _SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , _SCREAMING_SNAKE_CASE="preactivation" , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
a_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
a_ = num_channels
a_ = embedding_size
a_ = hidden_sizes
a_ = depths
a_ = layer_type
a_ = hidden_act
a_ = global_padding
a_ = num_groups
a_ = drop_path_rate
a_ = embedding_dynamic_padding
a_ = output_stride
a_ = width_factor
a_ = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
a_ , a_ = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names ) | 403 |
import math
_A = 10
_A = 7
_A = BALLS_PER_COLOUR * NUM_COLOURS
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int = 20 ) -> str:
"""simple docstring"""
a_ = math.comb(UpperCamelCase , UpperCamelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCamelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20)) | 403 | 1 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = args.log_outputs
lowerCAmelCase_ : int = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
lowerCAmelCase_ : int = load_metric("wer" )
lowerCAmelCase_ : List[str] = load_metric("cer" )
# compute metrics
lowerCAmelCase_ : str = wer.compute(references=result["target"] , predictions=result["prediction"] )
lowerCAmelCase_ : int = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
lowerCAmelCase_ : Dict = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase_ : List[str] = f'''log_{dataset_id}_predictions.txt'''
lowerCAmelCase_ : Dict = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , "w" ) as p, open(__UpperCamelCase , "w" ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Dict = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase_ : List[str] = re.sub(__UpperCamelCase , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase_ : List[str] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
lowerCAmelCase_ : Optional[Any] = " ".join(text.split(__UpperCamelCase ) )
return text
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase_ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase_ : int = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase_ : Tuple = dataset.cast_column("audio" , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase_ : Tuple = 0 if torch.cuda.is_available() else -1
lowerCAmelCase_ : Optional[Any] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
lowerCAmelCase_ : Tuple = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase_ : List[str] = prediction["text"]
lowerCAmelCase_ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
lowerCAmelCase_ : Tuple = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowercase__ = parser.parse_args()
main(args)
| 610 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __lowerCamelCase ( __UpperCamelCase ) -> bytes:
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 610 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__snake_case : str = 25_00_04
__snake_case : Dict = 25_00_20
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Any = MBartaaTokenizer
_SCREAMING_SNAKE_CASE : int = MBartaaTokenizerFast
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = MBartaaTokenizer(lowerCamelCase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = '''<s>'''
lowerCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = MBartaaTokenizer(lowerCamelCase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
_SCREAMING_SNAKE_CASE : List[Any] = '''facebook/mbart-large-50-one-to-many-mmt'''
_SCREAMING_SNAKE_CASE : int = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_SCREAMING_SNAKE_CASE : Tuple = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
lowerCAmelCase__ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCAmelCase__ = 1
return cls
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_00_38 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
lowerCAmelCase__ = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_00_53, 25_00_01] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='pt' )
lowerCAmelCase__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCAmelCase__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='pt' )
lowerCAmelCase__ = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='pt' )
lowerCAmelCase__ = targets['''input_ids''']
lowerCAmelCase__ = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_00_04, 62, 30_34, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_00_01,
} , )
| 718 |
import numpy as np
from PIL import Image
def _UpperCamelCase ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ = np.array(UpperCamelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
# compute the shape of the output matrix
lowerCAmelCase__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
return updated_arr
def _UpperCamelCase ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ = np.array(UpperCamelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
# compute the shape of the output matrix
lowerCAmelCase__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
__snake_case : List[str] = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 365 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __a ( _snake_case ):
def __init__( self : Optional[int] ,lowerCamelCase : Optional[NestedDataStructureLike[PathLike]] = None ,lowerCamelCase : Optional[NamedSplit] = None ,lowerCamelCase : Optional[Features] = None ,lowerCamelCase : str = None ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : Optional[int] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = path_or_paths
__SCREAMING_SNAKE_CASE = split if split or isinstance(lowerCamelCase ,lowerCamelCase ) else """train"""
__SCREAMING_SNAKE_CASE = features
__SCREAMING_SNAKE_CASE = cache_dir
__SCREAMING_SNAKE_CASE = keep_in_memory
__SCREAMING_SNAKE_CASE = streaming
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
class __a ( _snake_case ):
def __init__( self : Tuple ,lowerCamelCase : Optional[Features] = None ,lowerCamelCase : str = None ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : Optional[int] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = features
__SCREAMING_SNAKE_CASE = cache_dir
__SCREAMING_SNAKE_CASE = keep_in_memory
__SCREAMING_SNAKE_CASE = streaming
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
pass
| 109 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCamelCase ( _A , _A=None ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A=None ) -> str:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = requests.get(_A , headers=_A , allow_redirects=_A )
_UpperCAmelCase = result.headers["""Location"""]
_UpperCAmelCase = requests.get(_A , allow_redirects=_A )
_UpperCAmelCase = os.path.join(_A , F"""{artifact_name}.zip""" )
with open(_A , """wb""" ) as fp:
fp.write(response.content )
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
_UpperCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(""": """ )]
_UpperCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_A ) != len(_A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` """
F"""and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _UpperCamelCase ( _A , _A=None ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase = test.split("""/""" )[2]
else:
_UpperCAmelCase = None
return test
def _UpperCamelCase ( _A , _A=None ) -> str:
"""simple docstring"""
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """| no. | error | status |"""
_UpperCAmelCase = """|-:|:-|:-|"""
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["""count"""]
_UpperCAmelCase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_A )
return "\n".join(_A )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = """| model | no. of errors | major error | count |"""
_UpperCAmelCase = """|-:|-:|-:|-:|"""
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["""count"""]
_UpperCAmelCase ,_UpperCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : Any = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
a : Optional[int] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : Union[str, Any] = k.find(''' / ''')
a : Optional[int] = k[index + len(''' / ''') :]
a : Optional[int] = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Any = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : int = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : List[str] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : List[Any] = reduce_by_error(errors)
a : List[str] = reduce_by_model(errors)
a : Dict = make_github_table(reduced_by_error)
a : int = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 555 | 0 |
'''simple docstring'''
lowercase = range(2, 20 + 1)
lowercase = [10**k for k in range(ks[-1] + 1)]
lowercase = {}
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =sum(a_i[j] for j in range(lowercase__ , len(lowercase__ ) ) )
a_ =sum(a_i[j] * base[j] for j in range(min(len(lowercase__ ) , lowercase__ ) ) )
a_ , a_ =0, 0
a_ =n - i
a_ =memo.get(lowercase__ )
if sub_memo is not None:
a_ =sub_memo.get(lowercase__ )
if jumps is not None and len(lowercase__ ) > 0:
# find and make the largest jump without going over
a_ =-1
for _k in range(len(lowercase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
a_ =_k
break
if max_jump >= 0:
a_ , a_ , a_ =jumps[max_jump]
# since the difference between jumps is cached, add c
a_ =diff + c
for j in range(min(lowercase__ , len(lowercase__ ) ) ):
a_ , a_ =divmod(lowercase__ , 1_0 )
if new_c > 0:
add(lowercase__ , lowercase__ , lowercase__ )
else:
a_ =[]
else:
a_ ={c: []}
a_ =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
a_ , a_ =next_term(lowercase__ , k - 1 , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
a_ , a_ =compute(lowercase__ , lowercase__ , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
a_ =sub_memo[c]
# keep jumps sorted by # of terms skipped
a_ =0
while j < len(lowercase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowercase__ , (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(lowercase__ ):
a_i.extend([0 for _ in range(k - len(lowercase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
a_ =i
a_ , a_ , a_ =0, 0, 0
for j in range(len(lowercase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
a_ =ds_c + ds_b
diff += addend
a_ =0
for j in range(lowercase__ ):
a_ =a_i[j] + addend
a_ , a_ =divmod(lowercase__ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowercase__ , lowercase__ , lowercase__ )
return diff, i - start_i
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ , len(lowercase__ ) ):
a_ =digits[j] + addend
if s >= 1_0:
a_ , a_ =divmod(lowercase__ , 1_0 )
a_ =addend // 1_0 + quotient
else:
a_ =s
a_ =addend // 1_0
if addend == 0:
break
while addend > 0:
a_ , a_ =divmod(lowercase__ , 1_0 )
digits.append(lowercase__ )
def UpperCAmelCase_ ( lowercase__ = 1_0**1_5 ):
'''simple docstring'''
a_ =[1]
a_ =1
a_ =0
while True:
a_ , a_ =next_term(lowercase__ , 2_0 , i + dn , lowercase__ )
dn += terms_jumped
if dn == n - i:
break
a_ =0
for j in range(len(lowercase__ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 1 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__snake_case = logging.get_logger(__name__)
__snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : str = field(
default=snake_case_ , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(snake_case_ )} )
__UpperCAmelCase : str = field(
default=snake_case_ , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
__UpperCAmelCase : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__UpperCAmelCase : int = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
__UpperCAmelCase : int = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
__UpperCAmelCase : int = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
__UpperCAmelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
__UpperCAmelCase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__UpperCAmelCase : int = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__UpperCAmelCase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
__UpperCAmelCase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Tuple = '''train'''
__UpperCAmelCase : Optional[int] = '''dev'''
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : SquadDataTrainingArguments
__UpperCAmelCase : List[SquadFeatures]
__UpperCAmelCase : Split
__UpperCAmelCase : bool
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = Split.train , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = "pt" , ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = args
snake_case : List[Any] = is_language_sensitive
snake_case : Union[str, Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
try:
snake_case : Optional[int] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
snake_case : Union[str, Any] = mode
# Load data features from cache or dataset file
snake_case : str = "v2" if args.version_2_with_negative else "v1"
snake_case : str = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : str = cached_features_file + ".lock"
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
snake_case : Dict = time.time()
snake_case : Union[str, Any] = torch.load(UpperCamelCase__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case : str = self.old_features["features"]
snake_case : Union[str, Any] = self.old_features.get("dataset" , UpperCamelCase__ )
snake_case : Optional[int] = self.old_features.get("examples" , UpperCamelCase__ )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
snake_case : Any = self.processor.get_dev_examples(args.data_dir )
else:
snake_case : Tuple = self.processor.get_train_examples(args.data_dir )
snake_case ,snake_case : Optional[int] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=UpperCamelCase__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCamelCase__ , )
snake_case : Dict = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , UpperCamelCase__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self , UpperCamelCase__ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case : Optional[int] = self.features[i]
snake_case : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case : int = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case : List[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case : Any = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case : Optional[Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case : Any = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case : Optional[Any] = torch.tensor(feature.start_position , dtype=torch.long )
snake_case : Union[str, Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 178 |
"""simple docstring"""
import string
def __lowerCAmelCase ( lowercase : str ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
snake_case : Dict = ""
for symbol in message:
if symbol in string.ascii_uppercase:
snake_case : Dict = string.ascii_uppercase.find(lowercase )
snake_case : Optional[int] = num - key
if num < 0:
snake_case : Optional[int] = num + len(string.ascii_uppercase )
snake_case : Dict = translated + string.ascii_uppercase[num]
else:
snake_case : Any = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def __lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case : Dict = input("Encrypted message: " )
snake_case : Any = message.upper()
decrypt(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 178 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase_ : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = ["pixel_values"]
def __init__( self : Tuple , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PIL.Image.BICUBIC , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : Union[int, float] = 1 / 255 , _snake_case : bool = True , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
A_ = size if size is not None else {"height": 256, "width": 256}
A_ = get_size_dict(_snake_case )
A_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A_ = get_size_dict(_snake_case , param_name="crop_size" )
A_ = do_resize
A_ = size
A_ = resample
A_ = do_center_crop
A_ = crop_size
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : List[Any] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PIL.Image.BICUBIC , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : List[str] , ) -> np.ndarray:
"""simple docstring"""
A_ = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
_snake_case , size=(size["height"], size["width"]) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def lowerCamelCase__ ( self : str , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ) -> np.ndarray:
"""simple docstring"""
A_ = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_snake_case , size=(size["height"], size["width"]) , data_format=_snake_case , **_snake_case )
def lowerCamelCase__ ( self : List[str] , _snake_case : np.ndarray , _snake_case : Union[int, float] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : List[Any] , ) -> int:
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def lowerCamelCase__ ( self : str , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def lowerCamelCase__ ( self : List[Any] , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : Any=None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : Dict , ) -> PIL.Image.Image:
"""simple docstring"""
A_ = do_resize if do_resize is not None else self.do_resize
A_ = resample if resample is not None else self.resample
A_ = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = size if size is not None else self.size
A_ = get_size_dict(_snake_case )
A_ = crop_size if crop_size is not None else self.crop_size
A_ = get_size_dict(_snake_case , param_name="crop_size" )
A_ = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
A_ = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
A_ = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
A_ = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
A_ = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
A_ = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
A_ = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
A_ = {"pixel_values": images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 482 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCamelCase_ : int = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class __lowerCAmelCase ( unittest.TestCase , _lowercase ):
"""simple docstring"""
def lowerCamelCase__ ( self : str ) -> Dict:
"""simple docstring"""
A_ = load_tool("text-question-answering" )
self.tool.setup()
A_ = load_tool("text-question-answering" , remote=_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ = self.tool(_snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
A_ = self.remote_tool(_snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = self.tool(text=_snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A_ = self.remote_tool(text=_snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
| 482 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a_ ( __lowerCAmelCase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def a_ ( __lowerCAmelCase ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def a_ ( __lowerCAmelCase ):
lowerCAmelCase__ = set()
for token in tokens:
lowerCAmelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCAmelCase__ = list(__lowerCAmelCase )
return word_list
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCAmelCase__ = bert_tokens
lowerCAmelCase__ , lowerCAmelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCAmelCase__ = True
if is_chinese(bert_word[start] ):
lowerCAmelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCAmelCase__ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase__ = '''##''' + bert_word[j]
lowerCAmelCase__ = start + i
lowerCAmelCase__ = False
break
if single_word:
start += 1
return bert_word
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowerCAmelCase__ = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['''cws'''] ).cws
lowerCAmelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCAmelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowerCAmelCase__ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCAmelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = []
for id in input_ids:
lowerCAmelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCAmelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCAmelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def a_ ( __lowerCAmelCase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ = [json.dumps(__lowerCAmelCase ) + '''\n''' for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
__magic_name__ : Tuple = parser.parse_args()
main(args)
| 615 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : Union[str, Any] = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 615 | 1 |
"""simple docstring"""
import sys
from pathlib import Path
__A = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__A = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
__A = """zero2"""
__A = """zero3"""
__A = [ZEROa, ZEROa]
def UpperCamelCase ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name("""_""".join(str(_lowerCAmelCase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__A = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a ( A_ ):
'''simple docstring'''
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ) -> List[Any]:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] ) -> str:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ) -> Optional[int]:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Tuple ) -> int:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ) -> Any:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def lowerCAmelCase_ ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int = 10 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , ) -> Optional[int]:
__a = models[model]
__a = self.run_trainer(
stage=lowerCamelCase_ , model_name=lowerCamelCase_ , eval_steps=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
self.do_checks(lowerCamelCase_ )
return output_dir
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int = 10 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , ) -> Optional[Any]:
__a = self.get_auto_remove_tmp_dir("""./xxx""" , after=lowerCamelCase_ )
__a = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowerCamelCase_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__a = self.get_launcher(lowerCamelCase_ )
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase_ , env=self.get_env() )
return output_dir
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, Any]=False ) -> Any:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__a = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 714 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=7 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : int=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[str]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=1 / 2_55 , lowerCamelCase_ : int=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def lowerCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int=False ) -> List[str]:
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase_ , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["""shortest_edge"""] * h / w )
__a = self.size["""shortest_edge"""]
elif w > h:
__a = self.size["""shortest_edge"""]
__a = int(self.size["""shortest_edge"""] * w / h )
else:
__a = self.size["""shortest_edge"""]
__a = self.size["""shortest_edge"""]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[0] )[0]
__a = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( A_ , unittest.TestCase ):
A_ : Optional[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Dict ) -> Tuple:
__a = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Any ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCAmelCase_ ( self : Optional[int] ) -> List[str]:
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Tuple ) -> Tuple:
pass
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
__a = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : str ) -> List[Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Optional[int] ) -> Tuple:
# prepare image and target
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a = json.loads(f.read() )
__a = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
__a = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
__a = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , return_tensors="""pt""" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
__a = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase_ ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase_ ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase_ ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase_ ) )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase_ ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase_ ) )
@slow
def lowerCAmelCase_ ( self : str ) -> str:
# prepare image, target and masks_path
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a = json.loads(f.read() )
__a = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__a = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
__a = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , masks_path=lowerCamelCase_ , return_tensors="""pt""" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase_ ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase_ ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase_ ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase_ ) )
# verify masks
__a = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCamelCase_ )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase_ ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase_ ) )
| 173 | 0 |
def lowerCAmelCase_ ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 124 |
import requests
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> None:
'''simple docstring'''
__magic_name__ : Any = {"Content-Type": "application/json"}
__magic_name__ : Optional[int] = requests.post(_snake_case , json={"text": message_body} , headers=_snake_case )
if response.status_code != 200:
__magic_name__ : List[str] = (
"Request to slack returned an error "
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_snake_case )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 124 | 1 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Any = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
SCREAMING_SNAKE_CASE__:List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE__:Optional[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 714 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=None , lowerCamelCase=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : int = True
_snake_case : int = False
_snake_case : str = False
_snake_case : Optional[Any] = False
def a__ ( self ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCamelCase )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase )
# verify the logits
__a = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(lowerCamelCase )
| 67 | 0 |
from typing import Any
def lowerCAmelCase_ ( A_):
if not input_list:
return []
UpperCamelCase__: List[Any] = [input_list.count(lowercase_) for value in input_list]
UpperCamelCase__: int = max(lowercase_) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_) if value == y})
if __name__ == "__main__":
import doctest
doctest.testmod()
| 380 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 | 0 |
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = len(lowercase__ )
snake_case_ = [[0] * n for i in range(lowercase__ )]
for i in range(lowercase__ ):
snake_case_ = y_points[i]
for i in range(2 , lowercase__ ):
for j in range(lowercase__ , lowercase__ ):
snake_case_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
__A = """bit"""
__A = ["""preactivation""", """bottleneck"""]
__A = ["""SAME""", """VALID"""]
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = global_padding
snake_case_ = num_groups
snake_case_ = drop_path_rate
snake_case_ = embedding_dynamic_padding
snake_case_ = output_stride
snake_case_ = width_factor
snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 46 | 1 |
"""simple docstring"""
from timeit import timeit
_a = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = len(__snake_case ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = len(__snake_case ) // 2
_UpperCamelCase = len(__snake_case )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__snake_case ) )
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
if len(__snake_case ) <= 2:
return True
if s[0] == s[len(__snake_case ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
return s == s[::-1]
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
_UpperCamelCase = F'''all({name}(key) is value for key, value in test_data.items())'''
_UpperCamelCase = F'''from __main__ import test_data, {name}'''
_UpperCamelCase = 50_00_00
_UpperCamelCase = timeit(stmt=__snake_case, setup=__snake_case, number=__snake_case )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 19 |
"""simple docstring"""
# Imports
import numpy as np
class _UpperCAmelCase:
def __init__( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
def UpperCAmelCase ( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
if red is not None:
_UpperCamelCase = red
if green is not None:
_UpperCamelCase = green
if blue is not None:
_UpperCamelCase = blue
if red_edge is not None:
_UpperCamelCase = red_edge
if nir is not None:
_UpperCamelCase = nir
return True
def UpperCAmelCase ( self , __a="" , __a=None , __a=None , __a=None , __a=None , __a=None) -> List[str]:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
_UpperCamelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''')
return False
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self , __a=0.08 , __a=1.22 , __a=0.03) -> Optional[Any]:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase ( self , __a=0.16) -> Optional[Any]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self , __a=0.5) -> Dict:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self , __a=None , __a=None) -> Any:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
_UpperCamelCase = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 19 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase : List[str] = 4
lowerCAmelCase : List[str] = 3
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
pass
def a__ ( snake_case__ ) -> Dict:
for shard in shards:
for i in range(snake_case__ ):
yield {"i": i, "shard": shard}
def a__ ( ) -> List[Any]:
lowerCamelCase = int(os.environ["""RANK"""] )
lowerCamelCase = int(os.environ["""WORLD_SIZE"""] )
lowerCamelCase = ArgumentParser()
parser.add_argument("""--streaming""" , type=snake_case__ )
parser.add_argument("""--local_rank""" , type=snake_case__ )
parser.add_argument("""--num_workers""" , type=snake_case__ , default=0 )
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.streaming
lowerCamelCase = args.num_workers
lowerCamelCase = {"""shards""": [F'shard_{shard_idx}' for shard_idx in range(snake_case__ )]}
lowerCamelCase = IterableDataset.from_generator(snake_case__ , gen_kwargs=snake_case__ )
if not streaming:
lowerCamelCase = Dataset.from_list(list(snake_case__ ) )
lowerCamelCase = split_dataset_by_node(snake_case__ , rank=snake_case__ , world_size=snake_case__ )
lowerCamelCase = torch.utils.data.DataLoader(snake_case__ , num_workers=snake_case__ )
lowerCamelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowerCamelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowerCamelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 533 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["note_seq"]
def __init__( self , *_a , **_a ):
"""simple docstring"""
requires_backends(self , ["""note_seq"""] )
@classmethod
def _lowerCAmelCase ( cls , *_a , **_a ):
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _lowerCAmelCase ( cls , *_a , **_a ):
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
| 533 | 1 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: str = 'efficientformer'
def __init__( self , UpperCAmelCase = [3, 2, 6, 4] , UpperCAmelCase = [48, 96, 224, 448] , UpperCAmelCase = [True, True, True, True] , UpperCAmelCase = 448 , UpperCAmelCase = 32 , UpperCAmelCase = 4 , UpperCAmelCase = 7 , UpperCAmelCase = 5 , UpperCAmelCase = 8 , UpperCAmelCase = 4 , UpperCAmelCase = 0.0 , UpperCAmelCase = 16 , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = 2 , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 1 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = 1e-5 , UpperCAmelCase = "gelu" , UpperCAmelCase = 0.0_2 , UpperCAmelCase = 1e-1_2 , UpperCAmelCase = 224 , UpperCAmelCase = 1e-0_5 , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = depths
lowerCamelCase_ = mlp_expansion_ratio
lowerCamelCase_ = downsamples
lowerCamelCase_ = dim
lowerCamelCase_ = key_dim
lowerCamelCase_ = attention_ratio
lowerCamelCase_ = resolution
lowerCamelCase_ = pool_size
lowerCamelCase_ = downsample_patch_size
lowerCamelCase_ = downsample_stride
lowerCamelCase_ = downsample_pad
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = num_metaad_blocks
lowerCamelCase_ = distillation
lowerCamelCase_ = use_layer_scale
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = image_size
lowerCamelCase_ = batch_norm_eps
| 29 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A : List[Any] = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
snake_case_ : int = []
for num in range(len(lowerCamelCase_ ) ):
snake_case_ : List[Any] = 0
while 2 * i * i <= odd_composites[num]:
snake_case_ : List[str] = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase_ ) == n:
return list_nums
return []
def UpperCAmelCase ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }') | 334 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase_ = logging.getLogger()
def lowerCAmelCase( a__ : List[str] ):
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = os.path.join(_snake_case , "all_results.json" )
if os.path.exists(_snake_case ):
with open(_snake_case , "r" ) as f:
lowerCamelCase__ = json.load(_snake_case )
else:
raise ValueError(f"""can\'t find {path}""" )
return results
lowerCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class snake_case_ ( A__ ):
"""simple docstring"""
def __UpperCAmelCase ( self):
import xla_spawn
lowerCamelCase__ = self.get_auto_remove_tmp_dir()
lowerCamelCase__ = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase , "argv" , UpperCamelCase):
lowerCamelCase__ = time()
xla_spawn.main()
lowerCamelCase__ = time()
lowerCamelCase__ = get_results(UpperCamelCase)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00)
def __UpperCAmelCase ( self):
import xla_spawn
lowerCamelCase__ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(UpperCamelCase , "argv" , UpperCamelCase):
xla_spawn.main()
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 426 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def snake_case_ ():
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase_ ) ) )
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase_ : str ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCAmelCase_ , [1_28, 64, 32, 16, 8] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_a , _a = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCAmelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCAmelCase_ : Tuple ):
pass
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase_ : Union[str, Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase_ : Any ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_a = torch.cuda.memory_allocated()
_a = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCAmelCase_ )
_a = release_memory(lowerCAmelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCAmelCase_ )
| 22 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase__(A = "" ) ->dict[str, float]:
"""simple docstring"""
lowercase__ : Dict= url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowercase__ : Tuple= BeautifulSoup(requests.get(A ).text , "html.parser" )
lowercase__ : Optional[Any]= soup.find_all("td" , attrs="titleColumn" )
lowercase__ : Dict= soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(A , A )
}
def lowercase__(A = "IMDb_Top_250_Movies.csv" ) ->None:
"""simple docstring"""
lowercase__ : str= get_imdb_top_aaa_movies()
with open(A , "w" , newline="" ) as out_file:
lowercase__ : int= csv.writer(A )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 218 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def A (__A : Any ) -> Any:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __snake_case ( a ):
@staticmethod
def lowerCamelCase ( _snake_case : ArgumentParser):
"""simple docstring"""
UpperCAmelCase_ = parser.add_parser('''download''')
download_parser.add_argument(
'''--cache-dir''' , type=_snake_case , default=_snake_case , help='''Path to location to store the models''')
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''')
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=_snake_case , help='''Name of the model to download''')
download_parser.set_defaults(func=_snake_case)
def __init__( self : str , _snake_case : str , _snake_case : str , _snake_case : bool , _snake_case : bool):
"""simple docstring"""
UpperCAmelCase_ = model
UpperCAmelCase_ = cache
UpperCAmelCase_ = force
UpperCAmelCase_ = trust_remote_code
def lowerCamelCase ( self : str):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
| 169 |
def A (__A : list , __A : list , __A : int , __A : int , __A : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = knapsack(__A , __A , __A , __A , index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ = values[index] + knapsack(
__A , __A , __A , max_weight - weights[index] , index + 1 )
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 | 1 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__A : Dict = get_logger(__name__)
def lowerCAmelCase_ ( a : Dict , a : Union[str, Any] , a : str , a : Tuple , a : Dict=0 ):
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with FSDP.state_dict_type(
UpperCAmelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
a__ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
a__ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
a__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
a__ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
a__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
a__ = os.path.join(UpperCAmelCase_ , f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
logger.info(f'''Saving model to {ckpt_dir}''' )
a__ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=UpperCAmelCase_ , storage_writer=dist_cp.FileSystemWriter(UpperCAmelCase_ ) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''' )
def lowerCAmelCase_ ( a : int , a : int , a : str , a : Optional[Any] , a : Any=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCAmelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(UpperCAmelCase_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
a__ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
a__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(f'''Loading model from {input_model_file}''' )
a__ = torch.load(UpperCAmelCase_ )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
a__ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
a__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(f'''Loading model from {input_model_file}''' )
a__ = torch.load(UpperCAmelCase_ )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
a__ = (
os.path.join(UpperCAmelCase_ , f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
a__ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=UpperCAmelCase_ , storage_reader=dist_cp.FileSystemReader(UpperCAmelCase_ ) , planner=DefaultLoadPlanner() , )
a__ = state_dict['''model''']
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(UpperCAmelCase_ )
def lowerCAmelCase_ ( a : Dict , a : str , a : Optional[Any] , a : Optional[int] , a : int , a : List[Any]=0 ):
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with FSDP.state_dict_type(
UpperCAmelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
a__ = FSDP.optim_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
a__ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
a__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
a__ = os.path.join(UpperCAmelCase_ , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(UpperCAmelCase_ ) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def lowerCAmelCase_ ( a : Optional[Any] , a : Union[str, Any] , a : List[Any] , a : Any , a : str , a : Any=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCAmelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
a__ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
a__ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
a__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
a__ = torch.load(UpperCAmelCase_ )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
a__ = (
os.path.join(UpperCAmelCase_ , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
a__ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(UpperCAmelCase_ ) , )
a__ = optim_state['''optimizer''']
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
a__ = FSDP.optim_state_dict_to_load(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
optimizer.load_state_dict(UpperCAmelCase_ )
| 394 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 443 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : str ={'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_A : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_lowerCamelCase : Any = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A__ = '''lm_head'''
A__ = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
A__ = getattr(lowercase_ , lowercase_ ).shape
else:
A__ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
A__ = True
else:
for key, mapped_key in MAPPING.items():
A__ = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(lowercase_ )[0].split('''.''' )[-2]
A__ = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
A__ = '''weight_g'''
elif "weight_v" in name:
A__ = '''weight_v'''
elif "bias" in name:
A__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ = '''weight'''
else:
A__ = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = full_name.split('''conv_layers.''' )[-1]
A__ = name.split('''.''' )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True ) -> Any:
"""simple docstring"""
if config_path is not None:
A__ = UniSpeechConfig.from_pretrained(lowercase_ )
else:
A__ = UniSpeechConfig()
if is_finetuned:
if dict_path:
A__ = Dictionary.load_from_json(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ = target_dict.pad_index
A__ = target_dict.bos_index
A__ = target_dict.eos_index
A__ = len(target_dict.symbols )
A__ = os.path.join(lowercase_ , '''vocab.json''' )
if not os.path.isdir(lowercase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ = target_dict.indices
# fairseq has the <pad> and <s> switched
A__ = 42
A__ = 43
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
A__ = WavaVecaPhonemeCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase_ , )
A__ = True if config.feat_extract_norm == '''layer''' else False
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
A__ = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
A__ = UniSpeechForCTC(lowercase_ )
else:
A__ = UniSpeechForPreTraining(lowercase_ )
if is_finetuned:
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A__ = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , lowercase_ )
hf_unispeech.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 87 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCamelCase__ ( unittest.TestCase ):
def __a ( self : Dict ):
'''simple docstring'''
a__ = ["a", "b", "c"]
# Defaults to last layer if both are None
a__ , a__ = get_aligned_output_features_output_indices(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["c"] )
self.assertEqual(lowerCamelCase , [2] )
# Out indices set to match out features
a__ , a__ = get_aligned_output_features_output_indices(["a", "c"] , lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["a", "c"] )
self.assertEqual(lowerCamelCase , [0, 2] )
# Out features set to match out indices
a__ , a__ = get_aligned_output_features_output_indices(lowerCamelCase , [0, 2] , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["a", "c"] )
self.assertEqual(lowerCamelCase , [0, 2] )
# Out features selected from negative indices
a__ , a__ = get_aligned_output_features_output_indices(lowerCamelCase , [-3, -1] , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["a", "c"] )
self.assertEqual(lowerCamelCase , [-3, -1] )
def __a ( self : Dict ):
'''simple docstring'''
# Stage names must be set
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , lowerCamelCase )
# Out features must be a list
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(lowerCamelCase , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(lowerCamelCase , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def __a ( self : List[str] ):
'''simple docstring'''
a__ = BackboneMixin()
a__ = ["a", "b", "c"]
a__ = ["a", "c"]
a__ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
a__ = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
a__ = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 489 | 0 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
a_ = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
a_ = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case ( datasets.Metric):
def a_ ( self : int ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def a_ ( self : List[str] , a__ : Union[str, Any] , a__ : List[Any] , a__ : int=None , a__ : Optional[int]=None , a__ : Tuple=None , a__ : Any=None , a__ : Optional[int]="auto" , a__ : Tuple=-1 , a__ : Optional[int]=0.9 , a__ : Optional[int]=5 , a__ : Union[str, Any]=5_00 , a__ : Optional[Any]="gpt2-large" , a__ : Optional[int]=-1 , a__ : int=10_24 , a__ : Union[str, Any]=25 , a__ : Dict=5 , a__ : Optional[int]=True , a__ : int=25 , ) -> int:
'''simple docstring'''
_A = compute_mauve(
p_text=a__ , q_text=a__ , p_features=a__ , q_features=a__ , p_tokens=a__ , q_tokens=a__ , num_buckets=a__ , pca_max_data=a__ , kmeans_explained_var=a__ , kmeans_num_redo=a__ , kmeans_max_iter=a__ , featurize_model_name=a__ , device_id=a__ , max_text_length=a__ , divergence_curve_discretization_size=a__ , mauve_scaling_factor=a__ , verbose=a__ , seed=a__ , )
return out | 621 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__snake_case :List[Any] =logging.get_logger(__name__)
__snake_case :Union[str, Any] ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case :Tuple ={
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
__snake_case :str ={
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
__snake_case :List[Any] ={
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = PRETRAINED_INIT_CONFIGURATION
A_ : Optional[Any] = RoFormerTokenizer
def __init__( self : Optional[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]="[UNK]" , __UpperCamelCase : Any="[SEP]" , __UpperCamelCase : List[Any]="[PAD]" , __UpperCamelCase : Tuple="[CLS]" , __UpperCamelCase : List[str]="[MASK]" , __UpperCamelCase : int=True , __UpperCamelCase : Tuple=None , **__UpperCamelCase : Tuple , ) -> Union[str, Any]:
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , __UpperCamelCase ) != do_lower_case
or pre_tok_state.get('strip_accents' , __UpperCamelCase ) != strip_accents
):
A = getattr(__UpperCamelCase , pre_tok_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = pre_tok_class(**__UpperCamelCase )
A = do_lower_case
def __getstate__( self : str ) -> Optional[int]:
A = self.__dict__.copy()
A = BertPreTokenizer()
return state
def __setstate__( self : Dict , __UpperCamelCase : Any ) -> str:
A = d
A = self.__dict__['_tokenizer'].get_vocab()
A = PreTokenizer.custom(JiebaPreTokenizer(__UpperCamelCase ) )
def __UpperCamelCase ( self : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=None ) -> Union[str, Any]:
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
A = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[str]=False , **__UpperCamelCase : Union[str, Any] , ) -> Optional[Any]:
A = BertPreTokenizer()
return super().save_pretrained(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) | 106 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
_snake_case = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : str = VOCAB_FILES_NAMES
a_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] = ['input_ids', 'attention_mask']
a_ : Optional[Any] = GPTaTokenizer
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : int="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : str="<|endoftext|>" , SCREAMING_SNAKE_CASE__ : List[str]=False , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = kwargs.pop('add_bos_token' , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('type' ) )
lowerCamelCase__ = add_prefix_space
lowerCamelCase__ = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = add_prefix_space
def _UpperCamelCase ( self : Dict , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : "Conversation" ):
lowerCamelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowerCamelCase__ = input_ids[-self.model_max_length :]
return input_ids
| 510 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class a ( __snake_case ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : str = "▁" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "<unk>" , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "</s>" , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "<pad>" , ) -> str:
lowerCamelCase_ = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
lowerCamelCase_ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase_ = token_dict['token']
lowerCamelCase_ = Tokenizer(Unigram() )
lowerCamelCase_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
lowerCamelCase_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ),
pre_tokenizers.Digits(individual_digits=__SCREAMING_SNAKE_CASE ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase_ = decoders.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
lowerCamelCase_ = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 8000 , __SCREAMING_SNAKE_CASE : bool = True , ) -> Optional[int]:
lowerCamelCase_ = trainers.UnigramTrainer(
vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = [files]
self._tokenizer.train(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE )
self.add_unk_id()
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Union[Iterator[str], Iterator[Iterator[str]]] , __SCREAMING_SNAKE_CASE : int = 8000 , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[str, Any]:
lowerCamelCase_ = trainers.UnigramTrainer(
vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , )
self._tokenizer.train_from_iterator(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE )
self.add_unk_id()
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase_ = json.loads(self._tokenizer.to_str() )
lowerCamelCase_ = self.special_tokens['unk']['id']
lowerCamelCase_ = Tokenizer.from_str(json.dumps(__SCREAMING_SNAKE_CASE ) )
| 137 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
_SCREAMING_SNAKE_CASE : Dict = 637_8137.0
_SCREAMING_SNAKE_CASE : Any = 635_6752.31_4245
_SCREAMING_SNAKE_CASE : List[Any] = 637_8137
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
lowerCamelCase_ = (AXIS_A - AXIS_B) / AXIS_A
lowerCamelCase_ = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
lowerCamelCase_ = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
lowerCamelCase_ = radians(_lowerCamelCase )
lowerCamelCase_ = radians(_lowerCamelCase )
# Equation
lowerCamelCase_ = sin((phi_a - phi_a) / 2 )
lowerCamelCase_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowerCamelCase_ = sqrt(sin_sq_phi + (cos(_lowerCamelCase ) * cos(_lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class A:
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = parent
_UpperCamelCase :Dict = batch_size
_UpperCamelCase :Optional[int] = seq_length
_UpperCamelCase :Dict = is_training
_UpperCamelCase :Optional[int] = use_input_mask
_UpperCamelCase :int = use_token_type_ids
_UpperCamelCase :List[Any] = use_labels
_UpperCamelCase :Optional[Any] = vocab_size
_UpperCamelCase :int = hidden_size
_UpperCamelCase :int = num_hidden_layers
_UpperCamelCase :int = num_attention_heads
_UpperCamelCase :List[str] = intermediate_size
_UpperCamelCase :Optional[int] = hidden_act
_UpperCamelCase :List[Any] = hidden_dropout_prob
_UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob
_UpperCamelCase :Optional[int] = max_position_embeddings
_UpperCamelCase :Union[str, Any] = type_vocab_size
_UpperCamelCase :Dict = type_sequence_label_size
_UpperCamelCase :int = initializer_range
_UpperCamelCase :Optional[Any] = num_labels
_UpperCamelCase :Optional[int] = num_choices
_UpperCamelCase :List[Any] = scope
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase :str = None
if self.use_input_mask:
_UpperCamelCase :str = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase :List[str] = None
_UpperCamelCase :Union[str, Any] = None
_UpperCamelCase :Dict = None
_UpperCamelCase :Any = None
if self.use_labels:
_UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase :Tuple = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase :str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_UpperCamelCase :Any = FalconModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = True
_UpperCamelCase :Dict = FalconModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :Any = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
_UpperCamelCase :List[str] = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )
_UpperCamelCase :str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Any:
"""simple docstring"""
_UpperCamelCase :Optional[int] = FalconForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :int = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :List[Any] = True
_UpperCamelCase :Any = True
_UpperCamelCase :List[str] = FalconForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
# first forward pass
_UpperCamelCase :Optional[int] = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , )
_UpperCamelCase :str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase :Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase :Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCamelCase :List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCamelCase :Dict = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCamelCase :int = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , )['''hidden_states'''][0]
_UpperCamelCase :str = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , )['''hidden_states'''][0]
# select random slice
_UpperCamelCase :Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCamelCase :Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase :Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) :Tuple = config_and_inputs
_UpperCamelCase :Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
A = (FalconForCausalLM,) if is_torch_available() else ()
A = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = False
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = FalconModelTester(self )
_UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase , *_UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_UpperCamelCase :int = alibi
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase :Union[str, Any] = 3
_UpperCamelCase :List[Any] = input_dict['''input_ids''']
_UpperCamelCase :List[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCamelCase :Optional[Any] = FalconForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase :str = 3
_UpperCamelCase :Optional[int] = '''single_label_classification'''
_UpperCamelCase :List[Any] = input_dict['''input_ids''']
_UpperCamelCase :Union[str, Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCamelCase :List[str] = FalconForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase :int = input_dict['''input_ids''']
_UpperCamelCase :List[str] = FalconForCausalLM(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[Any] = input_ids.shape[0]
_UpperCamelCase :Any = model._convert_to_rw_cache(result.past_key_values )
_UpperCamelCase :Optional[int] = model._convert_cache_to_standard_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for layer in range(len(SCREAMING_SNAKE_CASE__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase :Optional[int] = 3
_UpperCamelCase :str = '''multi_label_classification'''
_UpperCamelCase :int = input_dict['''input_ids''']
_UpperCamelCase :List[str] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCamelCase :int = FalconForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
_UpperCamelCase , _UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(SCREAMING_SNAKE_CASE__ , '''use_cache''' ):
return
_UpperCamelCase :List[str] = model_class(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
if "use_cache" not in inputs:
_UpperCamelCase :str = True
_UpperCamelCase :str = model(**SCREAMING_SNAKE_CASE__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_UpperCamelCase :List[Any] = (
getattr(SCREAMING_SNAKE_CASE__ , '''decoder_layers''' , SCREAMING_SNAKE_CASE__ )
or getattr(SCREAMING_SNAKE_CASE__ , '''num_decoder_layers''' , SCREAMING_SNAKE_CASE__ )
or config.num_hidden_layers
)
_UpperCamelCase :List[Any] = getattr(SCREAMING_SNAKE_CASE__ , '''num_kv_heads''' , config.num_attention_heads )
_UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE__ , '''d_model''' , config.hidden_size )
_UpperCamelCase :List[Any] = embed_dim // num_attention_heads
_UpperCamelCase :Union[str, Any] = outputs['''past_key_values''']
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase , _UpperCamelCase :int = inputs['''input_ids'''].shape
for i in range(SCREAMING_SNAKE_CASE__ ):
if config.new_decoder_architecture:
_UpperCamelCase :Optional[int] = config.num_attention_heads
elif config.multi_query:
_UpperCamelCase :Union[str, Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class A( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :str = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
_UpperCamelCase :List[str] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :str = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[int] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
_UpperCamelCase :Dict = model.generate(**SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , max_new_tokens=19 )
_UpperCamelCase :Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )[0]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :str = FalconForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.eval()
model.to(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Union[str, Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , max_new_tokens=4 )
model.generate(**SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , max_new_tokens=4 )
model.generate(**SCREAMING_SNAKE_CASE__ , num_beams=2 , max_new_tokens=4 )
@slow
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :List[str] = FalconForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.eval()
model.to(device=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Dict = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# Test results are the same with and without cache
_UpperCamelCase :Union[str, Any] = model.generate(**SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , max_new_tokens=20 , use_cache=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :str = model.generate(**SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , max_new_tokens=20 , use_cache=SCREAMING_SNAKE_CASE__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 355 |
"""simple docstring"""
def A_ ( snake_case__ , snake_case__ = " " ) -> list:
_UpperCamelCase :List[str] = []
_UpperCamelCase :int = 0
for index, char in enumerate(snake_case__ ):
if char == separator:
split_words.append(string[last_index:index] )
_UpperCamelCase :Dict = index + 1
elif index + 1 == len(snake_case__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 24 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_UpperCAmelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
| 24 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__A : str = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__A : List[Any] = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__A : Union[str, Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def __a ( A__ : str , A__ : str ):
SCREAMING_SNAKE_CASE = len([g for position, g in enumerate(A__ ) if g == main_target[position]] )
return (item, float(A__ ))
def __a ( A__ : str , A__ : str ):
SCREAMING_SNAKE_CASE = random.randint(0 , len(A__ ) - 1 )
SCREAMING_SNAKE_CASE = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __a ( A__ : str , A__ : list[str] ):
SCREAMING_SNAKE_CASE = list(A__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE = random.choice(A__ )
return "".join(A__ )
def __a ( A__ : tuple[str, float] , A__ : list[tuple[str, float]] , A__ : list[str] , ):
SCREAMING_SNAKE_CASE = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE = int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE = 10 if child_n >= 10 else child_n
for _ in range(A__ ):
SCREAMING_SNAKE_CASE = population_score[random.randint(0 , A__ )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = crossover(parent_a[0] , A__ )
# Append new string to the population list.
pop.append(mutate(A__ , A__ ) )
pop.append(mutate(A__ , A__ ) )
return pop
def __a ( A__ : str , A__ : list[str] , A__ : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(A__ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(A__ )
# Generate random starting population.
SCREAMING_SNAKE_CASE = []
for _ in range(A__ ):
population.append("".join([random.choice(A__ ) for i in range(len(A__ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE = [evaluate(A__ , A__ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(A__ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE = [
(item, score / len(A__ )) for item, score in population_score
]
# This is selection
for i in range(A__ ):
population.extend(select(population_score[int(A__ )] , A__ , A__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A__ ) > N_POPULATION:
break
if __name__ == "__main__":
__A : Optional[Any] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
__A : int = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
__A , __A , __A : Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
) | 16 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }') | 16 | 1 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_snake_case : int = logging.get_logger(__name__)
_snake_case : List[Any] = 'T5Config'
def snake_case_ (UpperCamelCase : jnp.array , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = jnp.zeros_like(UpperCamelCase )
_a = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_a = shifted_input_ids.at[:, 0].set(UpperCamelCase )
_a = jnp.where(shifted_input_ids == -100 , UpperCamelCase , UpperCamelCase )
return shifted_input_ids
class A ( _a ):
lowercase_ = 'mt5'
lowercase_ = MTaConfig
class A ( _a ):
lowercase_ = 'mt5'
lowercase_ = MTaConfig
class A ( _a ):
lowercase_ = 'mt5'
lowercase_ = MTaConfig
| 377 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case : List[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = ['BeitFeatureExtractor']
_snake_case : Any = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 377 | 1 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = str(id_ )
snake_case__ : Dict = None
snake_case__ : List[Any] = None
snake_case__ : Optional[int] = []
snake_case__ : Tuple = {} # {vertex:distance}
def __lt__( self , __SCREAMING_SNAKE_CASE ):
return self.key < other.key
def __repr__( self ):
return self.id
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
self.neighbors.append(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = weight
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __magic_name__ )
graph[b - 1].add_edge(graph[a - 1] , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : list , __magic_name__ : Vertex ) -> list:
'''simple docstring'''
snake_case__ : Optional[int] = []
for u in graph:
snake_case__ : str = math.inf
snake_case__ : List[Any] = None
snake_case__ : Dict = 0
snake_case__ : Tuple = graph[:]
while q:
snake_case__ : Any = min(__magic_name__ )
q.remove(__magic_name__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case__ : Optional[int] = u
snake_case__ : Dict = u.edges[v.id]
for i in range(1 , len(__magic_name__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase__ ( __magic_name__ : list , __magic_name__ : Vertex ) -> Iterator[tuple]:
'''simple docstring'''
for u in graph:
snake_case__ : Tuple = math.inf
snake_case__ : Tuple = None
snake_case__ : Optional[int] = 0
snake_case__ : str = list(__magic_name__ )
hq.heapify(__magic_name__ )
while h:
snake_case__ : str = hq.heappop(__magic_name__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case__ : Union[str, Any] = u
snake_case__ : Dict = u.edges[v.id]
hq.heapify(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """lxmert"""
lowercase_ = {}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any]=30_522 , SCREAMING_SNAKE_CASE : str=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[Any]=9_500 , SCREAMING_SNAKE_CASE : Dict=1_600 , SCREAMING_SNAKE_CASE : Tuple=400 , SCREAMING_SNAKE_CASE : Any=3_072 , SCREAMING_SNAKE_CASE : List[Any]="gelu" , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=512 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=1E-1_2 , SCREAMING_SNAKE_CASE : Optional[int]=9 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : str=5 , SCREAMING_SNAKE_CASE : Any=2_048 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : int=6.67 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : str=True , **SCREAMING_SNAKE_CASE : List[str] , ):
lowercase__ : Tuple = vocab_size
lowercase__ : str = hidden_size
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Dict = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = layer_norm_eps
lowercase__ : List[str] = num_qa_labels
lowercase__ : List[Any] = num_object_labels
lowercase__ : Dict = num_attr_labels
lowercase__ : Union[str, Any] = l_layers
lowercase__ : Optional[Any] = x_layers
lowercase__ : List[Any] = r_layers
lowercase__ : Dict = visual_feat_dim
lowercase__ : Any = visual_pos_dim
lowercase__ : str = visual_loss_normalizer
lowercase__ : Any = task_matched
lowercase__ : Optional[int] = task_mask_lm
lowercase__ : Dict = task_obj_predict
lowercase__ : Tuple = task_qa
lowercase__ : List[str] = visual_obj_loss
lowercase__ : Dict = visual_attr_loss
lowercase__ : Optional[int] = visual_feat_loss
lowercase__ : str = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**SCREAMING_SNAKE_CASE )
| 496 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = 'dandelin/vilt-b32-finetuned-vqa'
__lowerCAmelCase : Union[str, Any] = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
__lowerCAmelCase : str = 'image_qa'
__lowerCAmelCase : int = AutoProcessor
__lowerCAmelCase : Optional[Any] = AutoModelForVisualQuestionAnswering
__lowerCAmelCase : str = ['image', 'text']
__lowerCAmelCase : Optional[int] = ['text']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , ["""vision"""])
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
with torch.no_grad():
return self.model(**SCREAMING_SNAKE_CASE_).logits
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 495 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = data
lowercase__ : Node | None = None
class _snake_case :
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = None
lowercase__ : int = None
def __iter__( self):
'''simple docstring'''
lowercase__ : List[str] = self.head
while self.head:
yield node.data
lowercase__ : str = node.next
if node == self.head:
break
def __len__( self):
'''simple docstring'''
return sum(1 for _ in self)
def __repr__( self):
'''simple docstring'''
return "->".join(str(SCREAMING_SNAKE_CASE_) for item in iter(self))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.insert_nth(len(self) , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.insert_nth(0 , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if index < 0 or index > len(self):
raise IndexError("""list index out of range.""")
lowercase__ : Optional[int] = Node(SCREAMING_SNAKE_CASE_)
if self.head is None:
lowercase__ : Union[str, Any] = new_node # first node points itself
lowercase__ : Dict = new_node
elif index == 0: # insert at head
lowercase__ : int = self.head
lowercase__ : int = new_node
else:
lowercase__ : List[str] = self.head
for _ in range(index - 1):
lowercase__ : Union[str, Any] = temp.next
lowercase__ : int = temp.next
lowercase__ : Optional[Any] = new_node
if index == len(self) - 1: # insert at tail
lowercase__ : Union[str, Any] = new_node
def lowercase__ ( self):
'''simple docstring'''
return self.delete_nth(0)
def lowercase__ ( self):
'''simple docstring'''
return self.delete_nth(len(self) - 1)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ = 0):
'''simple docstring'''
if not 0 <= index < len(self):
raise IndexError("""list index out of range.""")
lowercase__ : str = self.head
if self.head == self.tail: # just one node
lowercase__ : List[Any] = None
elif index == 0: # delete head node
lowercase__ : str = self.tail.next.next
lowercase__ : List[Any] = self.head.next
else:
lowercase__ : Optional[int] = self.head
for _ in range(index - 1):
lowercase__ : List[Any] = temp.next
lowercase__ : Optional[Any] = temp.next
lowercase__ : Dict = temp.next.next
if index == len(self) - 1: # delete at tail
lowercase__ : Tuple = temp
return delete_node.data
def lowercase__ ( self):
'''simple docstring'''
return len(self) == 0
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase__ : int = CircularLinkedList()
assert len(lowercase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase_ ) == i
circular_linked_list.insert_nth(lowercase_ , i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 495 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict=False ) -> int:
"""simple docstring"""
UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
UpperCAmelCase = 'segformer.encoder.' + key
if key.startswith('backbone' ):
UpperCAmelCase = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
UpperCAmelCase = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(lowerCAmelCase )-1}" )
if "norm" in key:
UpperCAmelCase = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
UpperCAmelCase = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(lowerCAmelCase )-1}" )
if "layer_norm1" in key:
UpperCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
UpperCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase = key[key.find('block' ) + len('block' )]
UpperCAmelCase = key.replace(F"block{idx}" , F"block.{int(lowerCAmelCase )-1}" )
if "attn.q" in key:
UpperCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
UpperCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
UpperCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
UpperCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
UpperCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
UpperCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
UpperCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
UpperCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
UpperCAmelCase = key.replace(F"linear_c{idx}" , F"linear_c.{int(lowerCAmelCase )-1}" )
if key.startswith('head' ):
UpperCAmelCase = key.replace('head' , 'classifier' )
UpperCAmelCase = value
return new_state_dict
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
UpperCAmelCase = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return image
@torch.no_grad()
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = SegformerConfig()
UpperCAmelCase = False
# set attributes based on model_name
UpperCAmelCase = 'huggingface/label-files'
if "segformer" in model_name:
UpperCAmelCase = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
UpperCAmelCase = 150
UpperCAmelCase = 'ade20k-id2label.json'
UpperCAmelCase = (1, 150, 128, 128)
elif "city" in model_name:
UpperCAmelCase = 19
UpperCAmelCase = 'cityscapes-id2label.json'
UpperCAmelCase = (1, 19, 128, 128)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
UpperCAmelCase = True
UpperCAmelCase = model_name[4:6]
UpperCAmelCase = 1_000
UpperCAmelCase = 'imagenet-1k-id2label.json'
UpperCAmelCase = (1, 1_000)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCAmelCase = [64, 128, 320, 512]
UpperCAmelCase = 256
elif size == "b2":
UpperCAmelCase = [64, 128, 320, 512]
UpperCAmelCase = 768
UpperCAmelCase = [3, 4, 6, 3]
elif size == "b3":
UpperCAmelCase = [64, 128, 320, 512]
UpperCAmelCase = 768
UpperCAmelCase = [3, 4, 18, 3]
elif size == "b4":
UpperCAmelCase = [64, 128, 320, 512]
UpperCAmelCase = 768
UpperCAmelCase = [3, 8, 27, 3]
elif size == "b5":
UpperCAmelCase = [64, 128, 320, 512]
UpperCAmelCase = 768
UpperCAmelCase = [3, 6, 40, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCAmelCase , align=lowerCAmelCase , do_random_crop=lowerCAmelCase )
# prepare image
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='pt' ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
UpperCAmelCase = torch.load(lowerCAmelCase , map_location=torch.device('cpu' ) )
else:
UpperCAmelCase = torch.load(lowerCAmelCase , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
UpperCAmelCase = rename_keys(lowerCAmelCase , encoder_only=lowerCAmelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase , lowerCAmelCase )
# create HuggingFace model and load state dict
if encoder_only:
UpperCAmelCase = False
UpperCAmelCase = SegformerForImageClassification(lowerCAmelCase )
else:
UpperCAmelCase = SegformerForSemanticSegmentation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# forward pass
UpperCAmelCase = model(lowerCAmelCase )
UpperCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCAmelCase = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCAmelCase = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCAmelCase = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCAmelCase = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCAmelCase = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCAmelCase = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCAmelCase = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCAmelCase = torch.tensor(
[
[
[-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1],
[-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1],
[-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1],
],
[
[-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1],
[-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1],
[-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1],
],
[
[7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2],
[4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1],
[3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCAmelCase = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCAmelCase = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCAmelCase = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCAmelCase = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCAmelCase = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
UpperCAmelCase = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 373 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
SCREAMING_SNAKE_CASE_ = 6_3_7_8_1_3_7.0
SCREAMING_SNAKE_CASE_ = 6_3_5_6_7_5_2.3_1_4_2_4_5
SCREAMING_SNAKE_CASE_ = 637_8137
def lowercase__ ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ) -> float:
"""simple docstring"""
UpperCAmelCase = (AXIS_A - AXIS_B) / AXIS_A
UpperCAmelCase = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
UpperCAmelCase = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
UpperCAmelCase = radians(lowerCAmelCase )
UpperCAmelCase = radians(lowerCAmelCase )
# Equation
UpperCAmelCase = sin((phi_a - phi_a) / 2 )
UpperCAmelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCAmelCase = sqrt(sin_sq_phi + (cos(lowerCAmelCase ) * cos(lowerCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 373 | 1 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 711 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :Optional[Any] ) ->str:
lowercase : Dict = get_activation("""swish""" )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __snake_case ( self :Union[str, Any] ) ->Any:
lowercase : Any = get_activation("""silu""" )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __snake_case ( self :str ) ->str:
lowercase : Tuple = get_activation("""mish""" )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __snake_case ( self :List[str] ) ->Union[str, Any]:
lowercase : Optional[Any] = get_activation("""gelu""" )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 348 | 0 |
def lowerCamelCase__ ( _a , _a):
return 1 if input_a == input_a else 0
def lowerCamelCase__ ( ):
assert xnor_gate(0 , 0) == 1
assert xnor_gate(0 , 1) == 0
assert xnor_gate(1 , 0) == 0
assert xnor_gate(1 , 1) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1)) | 25 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=768 ):
super().__init__(UpperCAmelCase__ )
A__ = proj_size
A__ = CLIPVisionModel(UpperCAmelCase__ )
A__ = PaintByExampleMapper(UpperCAmelCase__ )
A__ = nn.LayerNorm(config.hidden_size )
A__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=False ):
A__ = self.model(pixel_values=UpperCAmelCase__ )
A__ = clip_output.pooler_output
A__ = self.mapper(latent_states[:, None] )
A__ = self.final_layer_norm(UpperCAmelCase__ )
A__ = self.proj_out(UpperCAmelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ ):
super().__init__()
A__ = (config.num_hidden_layers + 1) // 5
A__ = config.hidden_size
A__ = 1
A__ = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , activation_fn="gelu" , attention_bias=UpperCAmelCase__ )
for _ in range(UpperCAmelCase__ )
] )
def __A ( self , UpperCAmelCase__ ):
for block in self.blocks:
A__ = block(UpperCAmelCase__ )
return hidden_states
| 491 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
snake_case__ = random.Random()
def lowerCamelCase__ ( a : Union[str, Any] , a : Any=1.0 , a : Any=None , a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
a__ :Tuple = global_rng
a__ :str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( unittest.TestCase):
def __init__( self : int , __A : Optional[Any] , __A : Union[str, Any]=7 , __A : Optional[Any]=400 , __A : Tuple=2000 , __A : Optional[int]=10 , __A : List[str]=160 , __A : List[Any]=8 , __A : Dict=0.0 , __A : Union[str, Any]=4000 , __A : Union[str, Any]=False , __A : List[str]=True , ) ->List[str]:
"""simple docstring"""
a__ :Dict = parent
a__ :List[str] = batch_size
a__ :Union[str, Any] = min_seq_length
a__ :Optional[int] = max_seq_length
a__ :Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ :Tuple = padding_value
a__ :Tuple = sampling_rate
a__ :int = return_attention_mask
a__ :int = do_normalize
a__ :Optional[Any] = feature_size
a__ :Tuple = chunk_length
a__ :Optional[Any] = hop_length
def _snake_case ( self : Tuple ) ->int:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _snake_case ( self : int , __A : Tuple=False , __A : Union[str, Any]=False ) ->Optional[Any]:
"""simple docstring"""
def _flatten(__A : str ):
return list(itertools.chain(*__A ) )
if equal_length:
a__ :int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ :str = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ :Optional[Any] = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( _a ,unittest.TestCase):
lowerCamelCase_ = WhisperFeatureExtractor if is_speech_available() else None
def _snake_case ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a__ :Any = WhisperFeatureExtractionTester(self )
def _snake_case ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a__ :Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ :List[Any] = feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
a__ :str = self.feature_extraction_class.from_pretrained(__A )
a__ :Tuple = feat_extract_first.to_dict()
a__ :Dict = feat_extract_second.to_dict()
a__ :Optional[int] = feat_extract_first.mel_filters
a__ :Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def _snake_case ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a__ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ :str = os.path.join(__A , "feat_extract.json" )
feat_extract_first.to_json_file(__A )
a__ :Tuple = self.feature_extraction_class.from_json_file(__A )
a__ :int = feat_extract_first.to_dict()
a__ :Tuple = feat_extract_second.to_dict()
a__ :int = feat_extract_first.mel_filters
a__ :str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def _snake_case ( self : Union[str, Any] ) ->str:
"""simple docstring"""
a__ :str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ :List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a__ :str = [np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
a__ :List[Any] = feature_extractor(__A , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a__ :List[Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
a__ :str = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test batched
a__ :Optional[int] = feature_extractor(__A , return_tensors="np" ).input_features
a__ :Optional[int] = feature_extractor(__A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ :Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a__ :str = np.asarray(__A )
a__ :Union[str, Any] = feature_extractor(__A , return_tensors="np" ).input_features
a__ :List[Any] = feature_extractor(__A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test truncation required
a__ :List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
a__ :Optional[int] = [np.asarray(__A ) for speech_input in speech_inputs]
a__ :List[str] = [x[: feature_extractor.n_samples] for x in speech_inputs]
a__ :List[str] = [np.asarray(__A ) for speech_input in speech_inputs_truncated]
a__ :List[str] = feature_extractor(__A , return_tensors="np" ).input_features
a__ :List[str] = feature_extractor(__A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
def _snake_case ( self : List[Any] ) ->List[str]:
"""simple docstring"""
import torch
a__ :Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ :List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
a__ :int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ :Union[str, Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a__ :Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _snake_case ( self : Optional[Any] , __A : Any ) ->List[Any]:
"""simple docstring"""
a__ :str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__ :Dict = ds.sort("id" ).select(range(__A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
a__ :Tuple = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
a__ :Dict = self._load_datasamples(1 )
a__ :List[str] = WhisperFeatureExtractor()
a__ :Optional[Any] = feature_extractor(__A , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __A , atol=1E-4 ) )
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
a__ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ :List[Any] = self._load_datasamples(1 )[0]
a__ :Optional[int] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
a__ :List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__A )[0]
self.assertTrue(np.all(np.mean(__A ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A ) - 1 ) < 1E-3 ) )
| 373 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowerCamelCase__ ( a : Any , a : Any , a : List[Any] ) -> Any:
"""simple docstring"""
a__ :Optional[Any] = state_dict.pop(a )
a__ :Tuple = val
def lowerCamelCase__ ( a : List[str] ) -> str:
"""simple docstring"""
a__ :Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a__ :List[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
a__ :List[Any] = value
else:
a__ :Dict = value
return new_state_dict
def lowerCamelCase__ ( a : str , a : Dict=False ) -> List[Any]:
"""simple docstring"""
a__ :Optional[int] = ""
if is_panoptic:
a__ :str = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ :str = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a__ :Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ :int = in_proj_weight[:256, :]
a__ :List[str] = in_proj_bias[:256]
a__ :List[str] = in_proj_weight[256:512, :]
a__ :Optional[Any] = in_proj_bias[256:512]
a__ :Dict = in_proj_weight[-256:, :]
a__ :Tuple = in_proj_bias[-256:]
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
a__ :str = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ :Optional[Any] = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( a : Tuple , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
a__ :Optional[int] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
a__ :str = "resnet101"
if "dc5" in model_name:
a__ :Any = True
a__ :Optional[int] = "panoptic" in model_name
if is_panoptic:
a__ :Union[str, Any] = 250
else:
a__ :str = 91
a__ :Dict = "huggingface/label-files"
a__ :Optional[Any] = "coco-detection-id2label.json"
a__ :Dict = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) )
a__ :str = {int(a ): v for k, v in idalabel.items()}
a__ :int = idalabel
a__ :Dict = {v: k for k, v in idalabel.items()}
# load image processor
a__ :Union[str, Any] = "coco_panoptic" if is_panoptic else "coco_detection"
a__ :Any = ConditionalDetrImageProcessor(format=a )
# prepare image
a__ :int = prepare_img()
a__ :Optional[int] = image_processor(images=a , return_tensors="pt" )
a__ :int = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
a__ :str = torch.hub.load("DeppMeng/ConditionalDETR" , a , pretrained=a ).eval()
a__ :Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
a__ :Optional[Any] = "conditional_detr." + src
rename_key(a , a , a )
a__ :Dict = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a , is_panoptic=a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ :str = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a__ :List[Any] = state_dict.pop(a )
a__ :str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a__ :str = state_dict.pop(a )
a__ :str = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a__ :int = state_dict.pop(a )
a__ :str = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a__ :str = state_dict.pop(a )
a__ :Optional[Any] = val
# finally, create HuggingFace model and load state dict
a__ :str = ConditionalDetrForSegmentation(a ) if is_panoptic else ConditionalDetrForObjectDetection(a )
model.load_state_dict(a )
model.eval()
model.push_to_hub(repo_id=a , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
a__ :Union[str, Any] = conditional_detr(a )
a__ :Optional[int] = model(a )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
snake_case__ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 373 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def __lowerCAmelCase ( self : List[Any] , lowercase : Any=0 ):
'''simple docstring'''
UpperCAmelCase : int = np.random.RandomState(lowercase )
UpperCAmelCase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : int = self.get_dummy_inputs()
UpperCAmelCase : Any = pipe(**lowercase ).images
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase : Optional[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : Tuple = self.get_dummy_inputs()
UpperCAmelCase : str = pipe(**lowercase ).images
UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase : Optional[int] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : str = self.get_dummy_inputs()
UpperCAmelCase : Union[str, Any] = pipe(**lowercase ).images
UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase : List[Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : Any = self.get_dummy_inputs()
UpperCAmelCase : List[Any] = pipe(**lowercase ).images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase : List[Any] = pipe(**lowercase ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase : Dict = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : str = self.get_dummy_inputs()
UpperCAmelCase : Tuple = pipe(**lowercase ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase : int = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase : Tuple = pipe(**lowercase )
UpperCAmelCase : Optional[int] = output.images[0, -3:, -3:, -1]
UpperCAmelCase : str = self.get_dummy_inputs()
UpperCAmelCase : Any = 3 * [inputs.pop("prompt" )]
UpperCAmelCase : Any = pipe.tokenizer(
lowercase , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors="np" , )
UpperCAmelCase : Any = text_inputs["input_ids"]
UpperCAmelCase : List[str] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
UpperCAmelCase : List[str] = prompt_embeds
# forward
UpperCAmelCase : Tuple = pipe(**lowercase )
UpperCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : str = self.get_dummy_inputs()
UpperCAmelCase : Tuple = 3 * ["this is a negative prompt"]
UpperCAmelCase : Optional[int] = negative_prompt
UpperCAmelCase : List[str] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase : int = pipe(**lowercase )
UpperCAmelCase : Tuple = output.images[0, -3:, -3:, -1]
UpperCAmelCase : str = self.get_dummy_inputs()
UpperCAmelCase : Any = 3 * [inputs.pop("prompt" )]
UpperCAmelCase : int = []
for p in [prompt, negative_prompt]:
UpperCAmelCase : str = pipe.tokenizer(
lowercase , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors="np" , )
UpperCAmelCase : Optional[int] = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
UpperCAmelCase , UpperCAmelCase : List[str] = embeds
# forward
UpperCAmelCase : Tuple = pipe(**lowercase )
UpperCAmelCase : Any = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
@property
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ort.SessionOptions()
UpperCAmelCase : str = False
return options
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : Tuple = "A painting of a squirrel eating a burger"
np.random.seed(0 )
UpperCAmelCase : Optional[int] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
UpperCAmelCase : Tuple = output.images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase : int = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Any = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : Any = "open neural network exchange"
UpperCAmelCase : Dict = np.random.RandomState(0 )
UpperCAmelCase : Optional[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase , output_type="np" )
UpperCAmelCase : Optional[Any] = output.images
UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase : Tuple = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : List[str] = "open neural network exchange"
UpperCAmelCase : Tuple = np.random.RandomState(0 )
UpperCAmelCase : List[str] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase , output_type="np" )
UpperCAmelCase : Any = output.images
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase : List[str] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Tuple = 0
def test_callback_fn(lowercase : int , lowercase : int , lowercase : np.ndarray ) -> None:
UpperCAmelCase : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase : int = latents[0, -3:, -3:, -1]
UpperCAmelCase : Dict = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase : Any = latents[0, -3:, -3:, -1]
UpperCAmelCase : Dict = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : int = "Andromeda galaxy in a bottle"
UpperCAmelCase : Tuple = np.random.RandomState(0 )
pipe(
prompt=lowercase , num_inference_steps=5 , guidance_scale=7.5 , generator=lowercase , callback=lowercase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowercase , lowercase )
assert pipe.safety_checker is None
UpperCAmelCase : List[str] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase : Any = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 595 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''donut-swin'''
SCREAMING_SNAKE_CASE__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , lowercase : str=2_24 , lowercase : Optional[Any]=4 , lowercase : Optional[int]=3 , lowercase : Dict=96 , lowercase : Optional[int]=[2, 2, 6, 2] , lowercase : Optional[Any]=[3, 6, 12, 24] , lowercase : Tuple=7 , lowercase : Dict=4.0 , lowercase : Tuple=True , lowercase : Dict=0.0 , lowercase : int=0.0 , lowercase : int=0.1 , lowercase : List[Any]="gelu" , lowercase : Optional[Any]=False , lowercase : Tuple=0.0_2 , lowercase : int=1E-5 , **lowercase : str , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase : Any = image_size
UpperCAmelCase : Any = patch_size
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : Union[str, Any] = embed_dim
UpperCAmelCase : str = depths
UpperCAmelCase : Dict = len(lowercase )
UpperCAmelCase : Dict = num_heads
UpperCAmelCase : Union[str, Any] = window_size
UpperCAmelCase : Any = mlp_ratio
UpperCAmelCase : Optional[int] = qkv_bias
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = drop_path_rate
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = use_absolute_embeddings
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase : Dict = int(embed_dim * 2 ** (len(lowercase ) - 1) )
| 595 | 1 |
'''simple docstring'''
from PIL import Image
def __UpperCAmelCase ( _UpperCAmelCase : Image , _UpperCAmelCase : int ) -> Image:
__snake_case = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(_UpperCAmelCase : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(_UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
a : Dict = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 680 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase(_lowercase , unittest.TestCase ):
__snake_case: List[str] = RoCBertTokenizer
__snake_case: Optional[int] = None
__snake_case: List[str] = False
__snake_case: Union[str, Any] = True
__snake_case: int = filter_non_english
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
a__ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
a__ = {}
a__ = {}
for i, value in enumerate(__SCREAMING_SNAKE_CASE ):
a__ = i
a__ = i
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
a__ = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowercase__ ( self ) -> int:
"""simple docstring"""
a__ = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
a__ = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
a__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
a__ = {}
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
a__ = i
a__ = RoCBertWordpieceTokenizer(vocab=__SCREAMING_SNAKE_CASE , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
a__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
a__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def lowercase__ ( self ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a__ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
a__ = tokenizer_r.encode_plus(
__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , )
a__ = tokenizer_r.do_lower_case if hasattr(__SCREAMING_SNAKE_CASE , 'do_lower_case' ) else False
a__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = ['的', '人', '有']
a__ = ''.join(__SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = True
a__ = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a__ = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a__ = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a__ = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a__ = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
a__ = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ = False
a__ = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a__ = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a__ = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a__ = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a__ = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
a__ = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
a__ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(__SCREAMING_SNAKE_CASE )
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
a__ = tokenizer.encode('你好' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a__ = tokenizer.encode('你是谁' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a__ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
a__ = '你好,你是谁'
a__ = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.convert_tokens_to_shape_ids(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.convert_tokens_to_pronunciation_ids(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.prepare_for_model(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a__ = tokenizer.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 273 |
"""simple docstring"""
import math
class lowercase:
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
a__ = 0.0
a__ = 0.0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __magic_name__ ( ) -> None:
# Training Examples ( m, n )
a__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
a__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
a__ = SelfOrganizingMap()
a__ = 3
a__ = 0.5
for _ in range(UpperCamelCase ):
for j in range(len(UpperCamelCase ) ):
# training sample
a__ = training_samples[j]
# Compute the winning vector
a__ = self_organizing_map.get_winner(UpperCamelCase , UpperCamelCase )
# Update the winning vector
a__ = self_organizing_map.update(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# classify test sample
a__ = [0, 0, 0, 1]
a__ = self_organizing_map.get_winner(UpperCamelCase , UpperCamelCase )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 273 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 581 |
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__( snake_case__ , unittest.TestCase ):
a_ : str = PriorTransformer
a_ : Tuple = '''hidden_states'''
@property
def _lowercase ( self ) -> int:
snake_case__ =4
snake_case__ =8
snake_case__ =7
snake_case__ =floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowercase ( self , _UpperCAmelCase=0 ) -> List[str]:
torch.manual_seed(_UpperCAmelCase )
snake_case__ =4
snake_case__ =8
snake_case__ =7
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowercase ( self ) -> str:
return (4, 8)
@property
def _lowercase ( self ) -> Any:
return (4, 8)
def _lowercase ( self ) -> Dict:
snake_case__ ={
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self ) -> List[Any]:
snake_case__ , snake_case__ =PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_UpperCAmelCase )
snake_case__ =model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowercase ( self ) -> Optional[Any]:
snake_case__ , snake_case__ =self.prepare_init_args_and_inputs_for_common()
snake_case__ =self.model_class(**_UpperCAmelCase )
snake_case__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ =[*signature.parameters.keys()]
snake_case__ =['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , _UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
snake_case__ =model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
snake_case__ =self.get_dummy_seed_input()
with torch.no_grad():
snake_case__ =model(**_UpperCAmelCase )[0]
snake_case__ =output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
snake_case__ =torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) )
@slow
class a__( unittest.TestCase ):
def _lowercase ( self , _UpperCAmelCase=1 , _UpperCAmelCase=768 , _UpperCAmelCase=77 , _UpperCAmelCase=0 ) -> Optional[Any]:
torch.manual_seed(_UpperCAmelCase )
snake_case__ =batch_size
snake_case__ =embedding_dim
snake_case__ =num_embeddings
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
snake_case__ =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowercase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
snake_case__ =PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(_UpperCAmelCase )
snake_case__ =self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
snake_case__ =sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
snake_case__ =torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
| 581 | 1 |
'''simple docstring'''
import string
def a_ ( __snake_case : str ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
lowerCamelCase_ =''''''
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCamelCase_ =string.ascii_uppercase.find(__snake_case )
lowerCamelCase_ =num - key
if num < 0:
lowerCamelCase_ =num + len(string.ascii_uppercase )
lowerCamelCase_ =translated + string.ascii_uppercase[num]
else:
lowerCamelCase_ =translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def a_ ( ) -> None:
"""simple docstring"""
lowerCamelCase_ =input('''Encrypted message: ''' )
lowerCamelCase_ =message.upper()
decrypt(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 676 |
'''simple docstring'''
from __future__ import annotations
a_ : int = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 676 | 1 |
import random
def lowerCAmelCase_ ( __a , __a , __a = False ) -> int:
"""simple docstring"""
lowerCamelCase__: dict ={i: [] for i in range(_lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase ):
for j in range(i + 1 , _lowerCamelCase ):
if random.random() < probability:
graph[i].append(_lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase )
return graph
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
return {
i: [j for j in range(_lowerCamelCase ) if i != j] for i in range(_lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__A = datasets.utils.logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
lowercase_ = 1_0000
lowercase_ = None
lowercase_ = None
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowercase_ = ParquetConfig
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""")
lowerCamelCase__: Optional[Any] =dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase_ , (str, list, tuple)):
lowerCamelCase__: Any =data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__: Optional[int] =[dl_manager.iter_files(UpperCAmelCase_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
lowerCamelCase__: int =[]
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: List[Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__: str =[dl_manager.iter_files(UpperCAmelCase_) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCAmelCase_):
with open(UpperCAmelCase_ , "rb") as f:
lowerCamelCase__: Union[str, Any] =datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase_))
break
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={"files": files}))
return splits
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase__: str =table_cast(UpperCAmelCase_ , self.info.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: str =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""")
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_)):
with open(UpperCAmelCase_ , "rb") as f:
lowerCamelCase__: Optional[Any] =pq.ParquetFile(UpperCAmelCase_)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
lowerCamelCase__: Optional[Any] =pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCAmelCase_)
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_)}: {e}""")
raise
| 437 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__lowerCamelCase : Optional[int] = logging.getLogger()
__lowerCamelCase : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A__ ( __snake_case ):
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
os.makedirs(A_ , exist_ok=A_ )
UpperCamelCase : Any = {"source": "What is love ?", "target": "life"}
UpperCamelCase : Optional[int] = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase : List[Any] = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(A_ , F"""{split}.{field}""" ) , "w" ) as f:
f.write(A_ )
def __UpperCamelCase( self , A_ , A_ = "pytorch" ):
'''simple docstring'''
UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
UpperCamelCase : List[Any] = os.path.join(A_ , "output" )
UpperCamelCase : List[Any] = os.path.join(A_ , "data" )
self._create_dummy_data(data_dir=A_ )
UpperCamelCase : Union[str, Any] = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCamelCase : Union[str, Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A_ , env=self.get_env() )
UpperCamelCase : str = os.path.join(A_ , "metrics.json" )
with open(A_ ) as f:
UpperCamelCase : List[Any] = json.load(A_ )
return result
@require_torch_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 629 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : int = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : int = use_input_mask
UpperCamelCase : Optional[Any] = use_token_type_ids
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = embedding_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : Any = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : str = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : Tuple = scope
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[str] = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Dict = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase : Dict = model(A_ , token_type_ids=A_ )
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = MegatronBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[int] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = MegatronBertForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = MegatronBertForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertForPreTraining(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = MegatronBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_labels
UpperCamelCase : Optional[int] = MegatronBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.num_labels
UpperCamelCase : List[str] = MegatronBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[str] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.num_choices
UpperCamelCase : int = MegatronBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Any = config_and_inputs
UpperCamelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
# test_resize_embeddings = False
_UpperCAmelCase :Optional[Any] = False
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Any = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
UpperCamelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A_ )
def A_ ( _lowerCAmelCase ) -> int:
return torch.tensor(
_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase , )
__lowerCamelCase : Optional[int] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
UpperCamelCase : Union[str, Any] = os.path.join(os.environ["MYDIR"] , A_ )
UpperCamelCase : List[Any] = MegatronBertModel.from_pretrained(A_ )
model.to(A_ )
model.half()
UpperCamelCase : Optional[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
UpperCamelCase : Tuple = model(A_ )[0]
UpperCamelCase : Dict = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A_ )
UpperCamelCase : Union[str, Any] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
UpperCamelCase : List[str] = output[0, ii, jj]
UpperCamelCase : List[Any] = expected[3 * ii + jj]
UpperCamelCase : Optional[Any] = "ii={} jj={} a={} b={}".format(A_ , A_ , A_ , A_ )
self.assertTrue(math.isclose(A_ , A_ , rel_tol=A_ , abs_tol=A_ ) , msg=A_ )
| 629 | 1 |
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''_T''')
class snake_case__(Generic[_T] ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Iterable[_T] | None = None ):
lowercase__ : list[_T] = list(iterable or [] )
lowercase__ : list[_T] = []
def __len__( self : Optional[int] ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Tuple ):
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : _T ):
self._stacka.append(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : str = self._stacka.pop
lowercase__ : Dict = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 81 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase__ ( lowercase , lowercase , lowercase = "x" , lowercase = 10**-10 , lowercase = 1 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = symbols(lowercase )
SCREAMING_SNAKE_CASE : Tuple = lambdify(lowercase , lowercase )
SCREAMING_SNAKE_CASE : str = lambdify(lowercase , diff(lowercase , lowercase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = starting_point
while True:
if diff_function(lowercase ) != 0:
SCREAMING_SNAKE_CASE : str = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 62 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Any =self.dummy_uncond_unet
lowerCamelCase__ : Any =KarrasVeScheduler()
lowerCamelCase__ : List[str] =KarrasVePipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =torch.manual_seed(0 )
lowerCamelCase__ : Any =pipe(num_inference_steps=2 , generator=lowerCamelCase_ , output_type='numpy' ).images
lowerCamelCase__ : Optional[Any] =torch.manual_seed(0 )
lowerCamelCase__ : str =pipe(num_inference_steps=2 , generator=lowerCamelCase_ , output_type='numpy' , return_dict=lowerCamelCase_ )[0]
lowerCamelCase__ : int =image[0, -3:, -3:, -1]
lowerCamelCase__ : Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Dict =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : List[str] ='google/ncsnpp-celebahq-256'
lowerCamelCase__ : Dict =UNetaDModel.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : List[str] =KarrasVeScheduler()
lowerCamelCase__ : List[str] =KarrasVePipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[str] =torch.manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(num_inference_steps=20 , generator=lowerCamelCase_ , output_type='numpy' ).images
lowerCamelCase__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase__ : Optional[Any] =np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 174 | 0 |
"""simple docstring"""
import heapq
def _lowerCamelCase ( lowerCamelCase__ : dict ):
lowercase__ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase__ : Any = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase__ : Optional[Any] = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase__ : List[Any] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}") | 128 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__( lowerCamelCase__ ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__( self ) -> Dict:
raise NotImplementedError() | 128 | 1 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
__lowercase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowercase = math.floor(val / multiple ) * multiple
if x < min_val:
__lowercase = math.ceil(val / multiple ) * multiple
return x
__lowercase = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
__lowercase , __lowercase = get_image_size(A__ )
__lowercase , __lowercase = output_size
# determine new height and width
__lowercase = output_height / input_height
__lowercase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowercase = scale_width
else:
# fit height
__lowercase = scale_height
__lowercase = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
__lowercase = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : Tuple ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase__ : bool = False ,lowercase__ : int = 1 ,lowercase__ : bool = True ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,**lowercase__ : Dict ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
__lowercase = get_size_dict(lowercase__ )
__lowercase = do_resize
__lowercase = size
__lowercase = keep_aspect_ratio
__lowercase = ensure_multiple_of
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : bool = False ,lowercase__ : int = 1 ,lowercase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Tuple ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
__lowercase = get_resize_output_image_size(
lowercase__ ,output_size=(size['''height'''], size['''width''']) ,keep_aspect_ratio=lowercase__ ,multiple=lowercase__ ,)
return resize(lowercase__ ,size=lowercase__ ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : np.ndarray ,lowercase__ : Union[int, float] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Optional[Any] ,):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : np.ndarray ,lowercase__ : Union[float, List[float]] ,lowercase__ : Union[float, List[float]] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Tuple ,):
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : ImageInput ,lowercase__ : bool = None ,lowercase__ : int = None ,lowercase__ : bool = None ,lowercase__ : int = None ,lowercase__ : PILImageResampling = None ,lowercase__ : bool = None ,lowercase__ : float = None ,lowercase__ : bool = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : str ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ )
__lowercase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowercase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowercase__ ,scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ,mean=lowercase__ ,std=lowercase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ,lowercase__ : List[Tuple] = None ):
__lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase__ ):
__lowercase = target_sizes.numpy()
__lowercase = []
for idx in range(len(lowercase__ ) ):
__lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='''bilinear''' ,align_corners=lowercase__ )
__lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
__lowercase = logits.argmax(dim=1 )
__lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 41 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 1 |
from __future__ import annotations
a_ = [True] * 1_000_001
a_ = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a_ = False
i += 1
def a__ ( _UpperCamelCase : int ):
return seive[n]
def a__ ( _UpperCamelCase : int ):
return any(digit in '''02468''' for digit in str(_UpperCamelCase ) )
def a__ ( _UpperCamelCase : int = 1_00_00_00 ):
__lowerCamelCase = [2] # result already includes the number 2.
for num in range(3 ,limit + 1 ,2 ):
if is_prime(_UpperCamelCase ) and not contains_an_even_digit(_UpperCamelCase ):
__lowerCamelCase = str(_UpperCamelCase )
__lowerCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(_UpperCamelCase ) )]
if all(is_prime(_UpperCamelCase ) for i in list_nums ):
result.append(_UpperCamelCase )
return result
def a__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"{len(find_circular_primes()) = }")
| 712 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ = logging.getLogger(__name__)
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any] ):
__lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 )
return np.sum(outputs == labels )
def a__ ( _UpperCamelCase : Optional[int] ):
with open(_UpperCamelCase ,encoding='''utf_8''' ) as f:
__lowerCamelCase = csv.reader(_UpperCamelCase )
__lowerCamelCase = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Dict ):
__lowerCamelCase = []
for dataset in encoded_datasets:
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch, 2) ,dtype=np.intaa )
__lowerCamelCase = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = mc_label
__lowerCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' ,type=_UpperCamelCase ,default='''openai-gpt''' ,help='''pretrained model name''' )
parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
parser.add_argument('''--train_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--eval_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--seed''' ,type=_UpperCamelCase ,default=42 )
parser.add_argument('''--num_train_epochs''' ,type=_UpperCamelCase ,default=3 )
parser.add_argument('''--train_batch_size''' ,type=_UpperCamelCase ,default=8 )
parser.add_argument('''--eval_batch_size''' ,type=_UpperCamelCase ,default=16 )
parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=_UpperCamelCase ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' ,type=_UpperCamelCase ,default=1 )
parser.add_argument(
'''--max_steps''' ,default=-1 ,type=_UpperCamelCase ,help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) ,)
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,)
parser.add_argument('''--learning_rate''' ,type=_UpperCamelCase ,default=6.25e-5 )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=_UpperCamelCase ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' ,type=_UpperCamelCase ,default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' ,type=_UpperCamelCase ,default=0.01 )
parser.add_argument('''--lm_coef''' ,type=_UpperCamelCase ,default=0.9 )
parser.add_argument('''--n_valid''' ,type=_UpperCamelCase ,default=3_74 )
parser.add_argument('''--server_ip''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
__lowerCamelCase = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowerCamelCase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase ,_UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowerCamelCase = ['''_start_''', '''_delimiter_''', '''_classify_''']
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase : Dict ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__lowerCamelCase = load_rocstories_dataset(args.train_dataset )
__lowerCamelCase = load_rocstories_dataset(args.eval_dataset )
__lowerCamelCase = (train_dataset, eval_dataset)
__lowerCamelCase = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
__lowerCamelCase = model.config.n_positions // 2 - 2
__lowerCamelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowerCamelCase = min(_UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowerCamelCase = pre_process_datasets(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,*_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = tensor_datasets[0], tensor_datasets[1]
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = RandomSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.train_batch_size )
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = SequentialSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowerCamelCase = args.max_steps
__lowerCamelCase = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
__lowerCamelCase = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowerCamelCase = list(model.named_parameters() )
__lowerCamelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__lowerCamelCase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__lowerCamelCase = AdamW(_UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon )
__lowerCamelCase = get_linear_schedule_with_warmup(
_UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=_UpperCamelCase )
if args.do_train:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ):
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = tqdm(_UpperCamelCase ,desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
__lowerCamelCase = model(_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowerCamelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowerCamelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowerCamelCase = model.module if hasattr(_UpperCamelCase ,'''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
torch.save(model_to_save.state_dict() ,_UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
__lowerCamelCase ,__lowerCamelCase = 0, 0
__lowerCamelCase ,__lowerCamelCase = 0, 0
for batch in tqdm(_UpperCamelCase ,desc='''Evaluating''' ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
with torch.no_grad():
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model(
_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = mc_logits.detach().cpu().numpy()
__lowerCamelCase = mc_labels.to('''cpu''' ).numpy()
__lowerCamelCase = accuracy(_UpperCamelCase ,_UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowerCamelCase = eval_loss / nb_eval_steps
__lowerCamelCase = eval_accuracy / nb_eval_examples
__lowerCamelCase = tr_loss / nb_tr_steps if args.do_train else None
__lowerCamelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__lowerCamelCase = os.path.join(args.output_dir ,'''eval_results.txt''' )
with open(_UpperCamelCase ,'''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' ,_UpperCamelCase ,str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 622 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int]=13, UpperCamelCase__ : Tuple=7, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : Any=True, UpperCamelCase__ : int=False, UpperCamelCase__ : str=True, UpperCamelCase__ : Dict=99, UpperCamelCase__ : Optional[Any]=32, UpperCamelCase__ : Tuple=5, UpperCamelCase__ : str=4, UpperCamelCase__ : Optional[int]=37, UpperCamelCase__ : List[str]="gelu", UpperCamelCase__ : Optional[Any]=0.1, UpperCamelCase__ : List[str]=0.1, UpperCamelCase__ : Optional[int]=5_12, UpperCamelCase__ : List[str]=16, UpperCamelCase__ : List[Any]=2, UpperCamelCase__ : Optional[int]=0.02, UpperCamelCase__ : Dict=3, UpperCamelCase__ : Optional[Any]=4, UpperCamelCase__ : Tuple=None, ) -> Dict:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def __UpperCAmelCase ( self : int ) -> List[str]:
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_A = ids_tensor([self.batch_size], self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, use_stable_embedding=UpperCamelCase__, )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[str], UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Any ) -> int:
_A = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : str, UpperCamelCase__ : int, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str, ) -> List[Any]:
_A = True
_A = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(
UpperCamelCase__, attention_mask=UpperCamelCase__, encoder_hidden_states=UpperCamelCase__, encoder_attention_mask=UpperCamelCase__, )
_A = model(
UpperCamelCase__, attention_mask=UpperCamelCase__, encoder_hidden_states=UpperCamelCase__, )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], ) -> Any:
_A = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Any, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : int, ) -> List[Any]:
_A = True
_A = True
_A = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
_A = model(
UpperCamelCase__, attention_mask=UpperCamelCase__, encoder_hidden_states=UpperCamelCase__, encoder_attention_mask=UpperCamelCase__, use_cache=UpperCamelCase__, )
_A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A = ids_tensor((self.batch_size, 3), config.vocab_size )
_A = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
_A = torch.cat([input_ids, next_tokens], dim=-1 )
_A = torch.cat([input_mask, next_mask], dim=-1 )
_A = model(
UpperCamelCase__, attention_mask=UpperCamelCase__, encoder_hidden_states=UpperCamelCase__, encoder_attention_mask=UpperCamelCase__, output_hidden_states=UpperCamelCase__, )['hidden_states'][0]
_A = model(
UpperCamelCase__, attention_mask=UpperCamelCase__, encoder_hidden_states=UpperCamelCase__, encoder_attention_mask=UpperCamelCase__, past_key_values=UpperCamelCase__, output_hidden_states=UpperCamelCase__, )['hidden_states'][0]
# select random slice
_A = ids_tensor((1,), output_from_past.shape[-1] ).item()
_A = output_from_no_past[:, -3:, random_slice_idx].detach()
_A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__, UpperCamelCase__, atol=1e-3 ) )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
_A = OpenLlamaModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ) -> str:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = 3
_A = input_dict['input_ids']
_A = input_ids.ne(1 ).to(UpperCamelCase__ )
_A = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
_A = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = 3
_A = 'single_label_classification'
_A = input_dict['input_ids']
_A = input_ids.ne(1 ).to(UpperCamelCase__ )
_A = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
_A = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = 3
_A = 'multi_label_classification'
_A = input_dict['input_ids']
_A = input_ids.ne(1 ).to(UpperCamelCase__ )
_A = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
_A = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def __UpperCAmelCase ( self : Any ) -> List[str]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Any ) -> List[str]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = ids_tensor([1, 10], config.vocab_size )
_A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
_A = original_model(UpperCamelCase__ ).last_hidden_state
_A = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A = {'type': scaling_type, 'factor': 10.0}
_A = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
_A = scaled_model(UpperCamelCase__ ).last_hidden_state
_A = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__, UpperCamelCase__, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__, UpperCamelCase__, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__, UpperCamelCase__, atol=1e-5 ) )
| 107 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _A , unittest.TestCase ):
_A = DDIMPipeline
_A = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_A = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
_A = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_A = False
def lowerCAmelCase_ ( self ) -> Any:
torch.manual_seed(0 )
snake_case__ :List[str] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
snake_case__ :int = DDIMScheduler()
snake_case__ :List[Any] = {"unet": unet, "scheduler": scheduler}
return components
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=0 ) -> Optional[Any]:
if str(UpperCamelCase ).startswith("mps" ):
snake_case__ :Dict = torch.manual_seed(UpperCamelCase )
else:
snake_case__ :Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ :List[str] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Tuple = "cpu"
snake_case__ :int = self.get_dummy_components()
snake_case__ :str = self.pipeline_class(**UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Optional[Any] = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :Dict = pipe(**UpperCamelCase ).images
snake_case__ :Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 32, 32, 3) )
snake_case__ :Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
snake_case__ :Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase ,1E-3 )
def lowerCAmelCase_ ( self ) -> Any:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[int] = "google/ddpm-cifar10-32"
snake_case__ :int = UNetaDModel.from_pretrained(UpperCamelCase )
snake_case__ :List[str] = DDIMScheduler()
snake_case__ :Any = DDIMPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
ddim.to(UpperCamelCase )
ddim.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Dict = torch.manual_seed(0 )
snake_case__ :Optional[Any] = ddim(generator=UpperCamelCase ,eta=0.0 ,output_type="numpy" ).images
snake_case__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ :str = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :int = "google/ddpm-ema-bedroom-256"
snake_case__ :Tuple = UNetaDModel.from_pretrained(UpperCamelCase )
snake_case__ :int = DDIMScheduler.from_pretrained(UpperCamelCase )
snake_case__ :Union[str, Any] = DDIMPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
ddpm.to(UpperCamelCase )
ddpm.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :int = torch.manual_seed(0 )
snake_case__ :Optional[int] = ddpm(generator=UpperCamelCase ,output_type="numpy" ).images
snake_case__ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case__ :Optional[int] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 241 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : str = 1
snake_case : Union[str, Any] = 3
snake_case : Any = (3_2, 3_2)
snake_case : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
snake_case : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=A , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def UpperCAmelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case : Tuple = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
return CLIPTextModel(A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Any = self.dummy_cond_unet_upscale
snake_case : Optional[int] = DDPMScheduler()
snake_case : Optional[int] = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : Optional[Any] = self.dummy_vae
snake_case : str = self.dummy_text_encoder
snake_case : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Any = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
snake_case : Dict = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_5_0 , )
snake_case : Union[str, Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Dict = """A painting of a squirrel eating a burger"""
snake_case : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
snake_case : Any = sd_pipe(
[prompt] , image=A , generator=A , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , )
snake_case : Union[str, Any] = output.images
snake_case : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
snake_case : List[str] = sd_pipe(
[prompt] , image=A , generator=A , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , return_dict=A , )[0]
snake_case : str = image[0, -3:, -3:, -1]
snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
snake_case : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case : Union[str, Any] = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.dummy_cond_unet_upscale
snake_case : Any = DDPMScheduler()
snake_case : Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : List[str] = self.dummy_vae
snake_case : List[Any] = self.dummy_text_encoder
snake_case : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Optional[Any] = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_5_0 , )
snake_case : Dict = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : List[str] = """A painting of a squirrel eating a burger"""
snake_case : str = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , )
snake_case : Union[str, Any] = output.images
assert image.shape[0] == 2
snake_case : Optional[int] = torch.Generator(device=A ).manual_seed(0 )
snake_case : Tuple = sd_pipe(
[prompt] , image=A , generator=A , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , )
snake_case : Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCAmelCase ( self ) -> str:
snake_case : str = self.dummy_cond_unet_upscale
snake_case : Tuple = DDPMScheduler()
snake_case : str = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : str = self.dummy_vae
snake_case : List[Any] = self.dummy_text_encoder
snake_case : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Optional[Any] = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
snake_case : Any = unet.half()
snake_case : List[str] = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_5_0 , )
snake_case : Dict = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Optional[int] = """A painting of a squirrel eating a burger"""
snake_case : Dict = torch.manual_seed(0 )
snake_case : Tuple = sd_pipe(
[prompt] , image=A , generator=A , num_inference_steps=2 , output_type="""np""" , ).images
snake_case : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
snake_case : Optional[int] = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : Any = """a cat sitting on a park bench"""
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : str = pipe(
prompt=A , image=A , generator=A , output_type="""np""" , )
snake_case : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCAmelCase ( self ) -> Dict:
snake_case : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
snake_case : Union[str, Any] = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : int = StableDiffusionUpscalePipeline.from_pretrained(
A , torch_dtype=torch.floataa , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : int = """a cat sitting on a park bench"""
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : List[str] = pipe(
prompt=A , image=A , generator=A , output_type="""np""" , )
snake_case : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCAmelCase ( self ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : int = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : Any = StableDiffusionUpscalePipeline.from_pretrained(
A , torch_dtype=torch.floataa , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case : List[Any] = """a cat sitting on a park bench"""
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : List[Any] = pipe(
prompt=A , image=A , generator=A , num_inference_steps=5 , output_type="""np""" , )
snake_case : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 684 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 1 |
from __future__ import annotations
from statistics import mean
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [0] * no_of_processes
__a = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCAmelCase__ ):
__a = burst_time[i]
__a = []
__a = 0
__a = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__a = []
__a = -1
for i in range(lowerCAmelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__a = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__a = i
total_time += burst_time[target_process]
completed += 1
__a = 0
__a = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [0] * no_of_processes
for i in range(lowerCAmelCase__ ):
__a = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 99 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 155 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = FlaxAutoencoderKL
@property
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.uniform(snake_case , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
| 185 |
def UpperCamelCase_( _A :List[str] , _A :Tuple , _A :Any , _A :Tuple , _A :Optional[int] , _A :str )-> List[str]:
if index == r:
for j in range(_A ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCamelCase__ = arr[i]
combination_util(_A , _A , _A , index + 1 , _A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_A , _A , _A , _A , _A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCamelCase_( _A :Dict , _A :Optional[Any] , _A :Union[str, Any] )-> Optional[int]:
# A temporary array to store all combination one by one
UpperCamelCase__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_A , _A , _A , 0 , _A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__UpperCamelCase = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 185 | 1 |
A_ : Any = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 57 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__UpperCAmelCase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 218 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case_ (__A : Tuple , __A : List[str] , __A : str=None , __A : Any=None , __A : Union[str, Any]=None , __A : str=None , __A : str=None , __A : Tuple=None , ) -> Optional[int]:
if attention_mask is None:
__lowerCAmelCase : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCAmelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCAmelCase : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str=13 , lowerCAmelCase : Union[str, Any]=7 , lowerCAmelCase : int=True , lowerCAmelCase : int=False , lowerCAmelCase : Any=99 , lowerCAmelCase : Dict=16 , lowerCAmelCase : int=2 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Any=2 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : List[str]=0.02 , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Tuple = hidden_dropout_prob
__lowerCAmelCase : str = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = eos_token_id
__lowerCAmelCase : List[Any] = pad_token_id
__lowerCAmelCase : Optional[Any] = bos_token_id
__lowerCAmelCase : Dict = initializer_range
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCAmelCase : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCAmelCase : Optional[int] = shift_tokens_right(lowerCAmelCase , 1 , 2 )
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase , )
__lowerCAmelCase : Dict = prepare_blenderbot_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : Tuple = model_class_name(lowerCAmelCase )
__lowerCAmelCase : str = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase ,__lowerCAmelCase : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__lowerCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : List[str] = model.decode(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 20
__lowerCAmelCase : Tuple = model_class_name(lowerCAmelCase )
__lowerCAmelCase : Tuple = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase ,__lowerCAmelCase : str = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : Any = model.decode(lowerCAmelCase , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase )
__lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =99
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCAmelCase : Dict = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Tuple = self._get_config_and_data()
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
__lowerCAmelCase : Any = lm_model(input_ids=lowerCAmelCase )
__lowerCAmelCase : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCAmelCase : List[str] = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
__lowerCAmelCase : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCAmelCase : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCAmelCase : List[str] = lm_model(input_ids=lowerCAmelCase , decoder_input_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCAmelCase : Tuple = shift_tokens_right(lowerCAmelCase , 1 , 2 )
__lowerCAmelCase : int = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
__lowerCAmelCase : List[Any] = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase , a_ ):
"""simple docstring"""
lowerCamelCase : Dict =True
lowerCamelCase : List[Any] =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase : Tuple =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotModelTester(self )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : Tuple = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = model_class(lowerCAmelCase )
@jax.jit
def encode_jitted(lowerCAmelCase : Optional[int] , lowerCAmelCase : Any=None , **lowerCAmelCase : Optional[Any] ):
return model.encode(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : Optional[int] = encode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : Tuple = encode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[Any] = model_class(lowerCAmelCase )
__lowerCAmelCase : Any = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__lowerCAmelCase : Union[str, Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , encoder_outputs=lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : Union[str, Any] = decode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : Optional[Any] = decode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCAmelCase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCAmelCase : Any = model(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
__lowerCAmelCase : str = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
__lowerCAmelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase )
__lowerCAmelCase : str = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
__lowerCAmelCase : List[str] = ["""Sam"""]
__lowerCAmelCase : List[str] = tokenizer(lowerCAmelCase , return_tensors="""jax""" )
__lowerCAmelCase : Union[str, Any] = model.generate(**lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = """Sam is a great name. It means \"sun\" in Gaelic."""
__lowerCAmelCase : List[Any] = tokenizer.batch_decode(lowerCAmelCase , **lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 218 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase ) | 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 46 | 1 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : Dict = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = "align_text_model"
def __init__( self : Optional[int] , A : Optional[int]=3_0_5_2_2 , A : List[str]=7_6_8 , A : Tuple=1_2 , A : int=1_2 , A : int=3_0_7_2 , A : int="gelu" , A : str=0.1 , A : Tuple=0.1 , A : List[Any]=5_1_2 , A : List[Any]=2 , A : Tuple=0.02 , A : str=1e-12 , A : int=0 , A : Union[str, Any]="absolute" , A : Dict=True , **A : str , ) ->int:
super().__init__(**A )
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : Tuple = position_embedding_type
lowerCamelCase__ : Tuple = use_cache
lowerCamelCase__ : Dict = pad_token_id
@classmethod
def __lowerCamelCase ( cls : Optional[Any] , A : Union[str, os.PathLike] , **A : int ) ->"PretrainedConfig":
cls._set_token_in_kwargs(A )
lowerCamelCase__ : Optional[int] = cls.get_config_dict(A , **A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCamelCase__ : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A , **A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = "align_vision_model"
def __init__( self : List[Any] , A : int = 3 , A : int = 6_0_0 , A : float = 2.0 , A : float = 3.1 , A : int = 8 , A : List[int] = [3, 3, 5, 3, 5, 5, 3] , A : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , A : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , A : List[int] = [] , A : List[int] = [1, 2, 2, 2, 1, 2, 1] , A : List[int] = [1, 2, 2, 3, 3, 4, 1] , A : List[int] = [1, 6, 6, 6, 6, 6, 6] , A : float = 0.25 , A : str = "swish" , A : int = 2_5_6_0 , A : str = "mean" , A : float = 0.02 , A : float = 0.0_01 , A : float = 0.99 , A : float = 0.2 , **A : Dict , ) ->Tuple:
super().__init__(**A )
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : List[str] = width_coefficient
lowerCamelCase__ : Union[str, Any] = depth_coefficient
lowerCamelCase__ : Any = depth_divisor
lowerCamelCase__ : Union[str, Any] = kernel_sizes
lowerCamelCase__ : List[str] = in_channels
lowerCamelCase__ : List[Any] = out_channels
lowerCamelCase__ : Tuple = depthwise_padding
lowerCamelCase__ : Dict = strides
lowerCamelCase__ : Dict = num_block_repeats
lowerCamelCase__ : List[str] = expand_ratios
lowerCamelCase__ : List[str] = squeeze_expansion_ratio
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : List[Any] = hidden_dim
lowerCamelCase__ : Dict = pooling_type
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : List[str] = batch_norm_eps
lowerCamelCase__ : Dict = batch_norm_momentum
lowerCamelCase__ : int = drop_connect_rate
lowerCamelCase__ : int = sum(A ) * 4
@classmethod
def __lowerCamelCase ( cls : Optional[int] , A : Union[str, os.PathLike] , **A : Any ) ->"PretrainedConfig":
cls._set_token_in_kwargs(A )
lowerCamelCase__ : Any = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCamelCase__ : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A , **A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = "align"
_UpperCAmelCase : List[str] = True
def __init__( self : Dict , A : Dict=None , A : str=None , A : Tuple=6_4_0 , A : Optional[int]=1.0 , A : str=0.02 , **A : Dict , ) ->Optional[int]:
super().__init__(**A )
if text_config is None:
lowerCamelCase__ : Optional[int] = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
lowerCamelCase__ : str = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
lowerCamelCase__ : List[str] = AlignTextConfig(**A )
lowerCamelCase__ : Any = AlignVisionConfig(**A )
lowerCamelCase__ : int = projection_dim
lowerCamelCase__ : List[Any] = temperature_init_value
lowerCamelCase__ : Optional[Any] = initializer_range
@classmethod
def __lowerCamelCase ( cls : List[Any] , A : AlignTextConfig , A : AlignVisionConfig , **A : Dict ) ->Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def __lowerCamelCase ( self : List[Any] ) ->int:
lowerCamelCase__ : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : List[str] = self.text_config.to_dict()
lowerCamelCase__ : Optional[int] = self.vision_config.to_dict()
lowerCamelCase__ : str = self.__class__.model_type
return output
| 713 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_A : Union[str, Any] = logging.get_logger(__name__)
_A : int = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = "longformer"
def __init__( self : Union[str, Any] , A : Union[List[int], int] = 5_1_2 , A : int = 2 , A : int = 1 , A : int = 0 , A : int = 2 , A : int = 3_0_5_2_2 , A : int = 7_6_8 , A : int = 1_2 , A : int = 1_2 , A : int = 3_0_7_2 , A : str = "gelu" , A : float = 0.1 , A : float = 0.1 , A : int = 5_1_2 , A : int = 2 , A : float = 0.02 , A : float = 1e-12 , A : bool = False , **A : Optional[Any] , ) ->Dict:
super().__init__(pad_token_id=A , **A )
lowerCamelCase__ : Optional[Any] = attention_window
lowerCamelCase__ : List[Any] = sep_token_id
lowerCamelCase__ : int = bos_token_id
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : List[Any] = onnx_export
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Tuple , A : "PretrainedConfig" , A : str = "default" , A : "List[PatchingSpec]" = None ) ->Any:
super().__init__(A , A , A )
lowerCamelCase__ : Any = True
@property
def __lowerCamelCase ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self : str ) ->Mapping[str, Mapping[int, str]]:
lowerCamelCase__ : Dict = super().outputs
if self.task == "default":
lowerCamelCase__ : str = {0: '''batch'''}
return outputs
@property
def __lowerCamelCase ( self : Optional[Any] ) ->float:
return 1e-4
@property
def __lowerCamelCase ( self : int ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __lowerCamelCase ( self : Union[str, Any] , A : "PreTrainedTokenizerBase" , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ) ->Mapping[str, Any]:
lowerCamelCase__ : Dict = super().generate_dummy_inputs(
preprocessor=A , batch_size=A , seq_length=A , is_pair=A , framework=A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ : Any = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
lowerCamelCase__ : List[str] = 1
return inputs
| 130 | 0 |
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : list ):
_A : int = len(lowerCamelCase )
for _ in range(lowerCamelCase ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
_A , _A : Any = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A : int = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 128 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]=3 , SCREAMING_SNAKE_CASE : int=7 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[str]=99 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : Union[str, Any]=5 , SCREAMING_SNAKE_CASE : Union[str, Any]=4 , SCREAMING_SNAKE_CASE : Dict=37 , SCREAMING_SNAKE_CASE : str="gelu" , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : List[str]=512 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Any=None , ):
_A : Tuple = parent
_A : Optional[int] = batch_size
_A : Union[str, Any] = seq_length
_A : str = is_training
_A : Any = use_input_mask
_A : Union[str, Any] = use_token_type_ids
_A : int = use_labels
_A : Optional[Any] = vocab_size
_A : Any = hidden_size
_A : List[str] = num_hidden_layers
_A : Optional[int] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : Dict = type_vocab_size
_A : List[Any] = type_sequence_label_size
_A : Optional[Any] = initializer_range
_A : Optional[int] = num_labels
_A : str = num_choices
_A : str = scope
def A ( self : Tuple):
_A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_A : Any = None
if self.use_input_mask:
_A : Dict = random_attention_mask([self.batch_size, self.seq_length])
_A : Tuple = None
_A : Dict = None
_A : Tuple = None
_A : Optional[Any] = None
if self.use_labels:
_A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_A : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_A : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int]):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=SCREAMING_SNAKE_CASE , )
def A ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]):
_A : Union[str, Any] = FalconModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)
_A : int = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , ):
_A : str = True
_A : int = FalconModel(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Tuple = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
_A : int = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
_A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , ):
_A : Any = FalconForCausalLM(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Dict = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ):
_A : int = True
_A : str = True
_A : int = FalconForCausalLM(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
# first forward pass
_A : Dict = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
_A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
_A : int = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
_A : List[str] = torch.cat([input_ids, next_tokens] , dim=-1)
_A : int = torch.cat([input_mask, next_mask] , dim=-1)
_A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['hidden_states'][0]
_A : Dict = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['hidden_states'][0]
# select random slice
_A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
_A : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3))
def A ( self : Tuple):
_A : Any = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Optional[int] = config_and_inputs
_A : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
a = (FalconForCausalLM,) if is_torch_available() else ()
a = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
a = False
def A ( self : Union[str, Any]):
_A : str = FalconModelTester(self)
_A : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37)
def A ( self : Tuple):
self.config_tester.run_common_tests()
def A ( self : int):
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def A ( self : Tuple):
_A , *_A : Tuple = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_A : List[Any] = alibi
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any]):
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[str] = 3
_A : Optional[int] = input_dict['input_ids']
_A : Any = input_ids.ne(1).to(SCREAMING_SNAKE_CASE)
_A : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
_A : List[str] = FalconForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : str = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A ( self : str):
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Tuple = 3
_A : int = 'single_label_classification'
_A : Tuple = input_dict['input_ids']
_A : Optional[int] = input_ids.ne(1).to(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
_A : Dict = FalconForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Any = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A ( self : Tuple):
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = input_dict['input_ids']
_A : Union[str, Any] = FalconForCausalLM(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Tuple = model(SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE)
_A : List[str] = input_ids.shape[0]
_A : Optional[Any] = model._convert_to_rw_cache(result.past_key_values)
_A : List[str] = model._convert_cache_to_standard_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
for layer in range(len(SCREAMING_SNAKE_CASE)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def A ( self : int):
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Optional[Any] = 'multi_label_classification'
_A : List[str] = input_dict['input_ids']
_A : Optional[int] = input_ids.ne(1).to(SCREAMING_SNAKE_CASE)
_A : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
_A : Union[str, Any] = FalconForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A ( self : str):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(SCREAMING_SNAKE_CASE , 'use_cache'):
return
_A : Tuple = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
if "use_cache" not in inputs:
_A : List[str] = True
_A : Any = model(**SCREAMING_SNAKE_CASE)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_A : List[str] = (
getattr(SCREAMING_SNAKE_CASE , 'decoder_layers' , SCREAMING_SNAKE_CASE)
or getattr(SCREAMING_SNAKE_CASE , 'num_decoder_layers' , SCREAMING_SNAKE_CASE)
or config.num_hidden_layers
)
_A : List[str] = getattr(SCREAMING_SNAKE_CASE , 'num_kv_heads' , config.num_attention_heads)
_A : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , 'd_model' , config.hidden_size)
_A : Optional[int] = embed_dim // num_attention_heads
_A : Union[str, Any] = outputs['past_key_values']
self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
_A , _A : Union[str, Any] = inputs['input_ids'].shape
for i in range(SCREAMING_SNAKE_CASE):
if config.new_decoder_architecture:
_A : Optional[Any] = config.num_attention_heads
elif config.multi_query:
_A : str = 1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int]):
_A : Dict = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b')
_A : Dict = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b')
model.eval()
model.to(SCREAMING_SNAKE_CASE)
_A : List[Any] = tokenizer('My favorite food is' , return_tensors='pt').to(SCREAMING_SNAKE_CASE)
_A : Optional[int] = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
_A : Union[str, Any] = model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=19)
_A : Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE)[0]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
@slow
def A ( self : Any):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_A : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE)
_A : str = FalconForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE)
model.eval()
model.to(SCREAMING_SNAKE_CASE)
_A : Dict = tokenizer('My favorite food is' , return_tensors='pt').to(SCREAMING_SNAKE_CASE)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=4)
model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=4)
model.generate(**SCREAMING_SNAKE_CASE , num_beams=2 , max_new_tokens=4)
@slow
def A ( self : int):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_A : Union[str, Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE)
_A : List[Any] = FalconForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE)
model.eval()
model.to(device=SCREAMING_SNAKE_CASE)
_A : Tuple = tokenizer('My favorite food is' , return_tensors='pt').to(SCREAMING_SNAKE_CASE)
# Test results are the same with and without cache
_A : List[str] = model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=SCREAMING_SNAKE_CASE)
_A : List[str] = model.generate(**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=SCREAMING_SNAKE_CASE)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 128 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = inspect.getfile(accelerate.test_utils )
lowercase__ : Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowercase__ : Optional[int] = test_metrics
@require_cpu
def __a ( self ) -> List[Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __a ( self ) -> Dict:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def __a ( self ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowercase__ : Optional[Any] = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase , env=os.environ.copy() ) | 298 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__a : str = logging.get_logger(__name__)
__a : Any = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Tuple = """longformer"""
def __init__( self , lowerCamelCase = 512 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 0 , lowerCamelCase = 2 , lowerCamelCase = 30522 , lowerCamelCase = 768 , lowerCamelCase = 12 , lowerCamelCase = 12 , lowerCamelCase = 3072 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 512 , lowerCamelCase = 2 , lowerCamelCase = 0.02 , lowerCamelCase = 1E-12 , lowerCamelCase = False , **lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
lowercase__ : Dict = attention_window
lowercase__ : Optional[int] = sep_token_id
lowercase__ : List[Any] = bos_token_id
lowercase__ : List[str] = eos_token_id
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = intermediate_size
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Any = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = onnx_export
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase = "default" , lowerCamelCase = None ) -> Any:
"""simple docstring"""
super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowercase__ : str = True
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ : Optional[Any] = super().outputs
if self.task == "default":
lowercase__ : Optional[Any] = {0: "batch"}
return outputs
@property
def __a ( self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __a ( self ) -> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def __a ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase__ : int = super().generate_dummy_inputs(
preprocessor=lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
lowercase__ : List[Any] = 1
return inputs | 298 | 1 |
class lowercase__ :
'''simple docstring'''
def __init__( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = ''''''
UpperCamelCase__ : Optional[int] = ''''''
UpperCamelCase__ : int = []
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCamelCase__ : Dict = self.__min_dist_top_down_dp(m - 1, n - 1 )
else:
UpperCamelCase__ : int = self.__min_dist_top_down_dp(__magic_name__, n - 1 )
UpperCamelCase__ : List[Any] = self.__min_dist_top_down_dp(m - 1, __magic_name__ )
UpperCamelCase__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1, n - 1 )
UpperCamelCase__ : str = 1 + min(__magic_name__, __magic_name__, __magic_name__ )
return self.dp[m][n]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = worda
UpperCamelCase__ : List[Any] = worda
UpperCamelCase__ : int = [[-1 for _ in range(len(__magic_name__ ) )] for _ in range(len(__magic_name__ ) )]
return self.__min_dist_top_down_dp(len(__magic_name__ ) - 1, len(__magic_name__ ) - 1 )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> int:
"""simple docstring"""
UpperCamelCase__ : Any = worda
UpperCamelCase__ : str = worda
UpperCamelCase__ : Any = len(__magic_name__ )
UpperCamelCase__ : List[str] = len(__magic_name__ )
UpperCamelCase__ : Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCamelCase__ : Tuple = j
elif j == 0: # second string is empty
UpperCamelCase__ : Optional[int] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCamelCase__ : int = self.dp[i - 1][j - 1]
else:
UpperCamelCase__ : int = self.dp[i][j - 1]
UpperCamelCase__ : Union[str, Any] = self.dp[i - 1][j]
UpperCamelCase__ : List[str] = self.dp[i - 1][j - 1]
UpperCamelCase__ : List[Any] = 1 + min(__magic_name__, __magic_name__, __magic_name__ )
return self.dp[m][n]
if __name__ == "__main__":
UpperCAmelCase_ = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
UpperCAmelCase_ = input('Enter the first string: ').strip()
UpperCAmelCase_ = input('Enter the second string: ').strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 253 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=7, __magic_name__=3, __magic_name__=18, __magic_name__=30, __magic_name__=400, __magic_name__=True, __magic_name__=None, __magic_name__=True, __magic_name__=None, __magic_name__=True, __magic_name__=[0.5, 0.5, 0.5], __magic_name__=[0.5, 0.5, 0.5], ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = size if size is not None else {'''shortest_edge''': 18}
UpperCamelCase__ : str = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Tuple = image_size
UpperCamelCase__ : Optional[Any] = min_resolution
UpperCamelCase__ : List[Any] = max_resolution
UpperCamelCase__ : List[Any] = do_resize
UpperCamelCase__ : Any = size
UpperCamelCase__ : Dict = do_center_crop
UpperCamelCase__ : Tuple = crop_size
UpperCamelCase__ : Tuple = do_normalize
UpperCamelCase__ : Dict = image_mean
UpperCamelCase__ : Dict = image_std
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = LevitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__, '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__, '''image_std''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__, '''size''' ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
UpperCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, Image.Image )
# Test not batched input
UpperCamelCase__ : List[str] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : List[str] = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__, numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, np.ndarray )
# Test not batched input
UpperCamelCase__ : Dict = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : Dict = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__, torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, torch.Tensor )
# Test not batched input
UpperCamelCase__ : str = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : Union[str, Any] = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 253 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_UpperCAmelCase = logging.get_logger(__name__)
class snake_case_ ( __lowercase ):
A_ = 'mask2former'
A_ = ['swin']
A_ = {'hidden_size': 'hidden_dim'}
def __init__( self : str , _snake_case : Optional[Dict] = None , _snake_case : int = 256 , _snake_case : int = 256 , _snake_case : int = 256 , _snake_case : int = 1024 , _snake_case : str = "relu" , _snake_case : int = 6 , _snake_case : int = 10 , _snake_case : int = 8 , _snake_case : float = 0.0 , _snake_case : int = 2048 , _snake_case : bool = False , _snake_case : bool = False , _snake_case : int = 4 , _snake_case : int = 255 , _snake_case : int = 100 , _snake_case : float = 0.1 , _snake_case : float = 2.0 , _snake_case : float = 5.0 , _snake_case : float = 5.0 , _snake_case : int = 12544 , _snake_case : float = 3.0 , _snake_case : float = 0.75 , _snake_case : float = 0.02 , _snake_case : float = 1.0 , _snake_case : bool = True , _snake_case : List[int] = [4, 8, 16, 32] , _snake_case : bool = None , **_snake_case : str , )->Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
__lowerCAmelCase : Tuple = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_snake_case , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(_snake_case , _snake_case ):
__lowerCAmelCase : Union[str, Any] = backbone_config.pop("""model_type""" )
__lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase : List[str] = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
__lowerCAmelCase : Union[str, Any] = backbone_config
__lowerCAmelCase : Any = feature_size
__lowerCAmelCase : int = mask_feature_size
__lowerCAmelCase : Dict = hidden_dim
__lowerCAmelCase : Any = encoder_feedforward_dim
__lowerCAmelCase : Tuple = activation_function
__lowerCAmelCase : Optional[int] = encoder_layers
__lowerCAmelCase : str = decoder_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : str = dropout
__lowerCAmelCase : Dict = dim_feedforward
__lowerCAmelCase : List[str] = pre_norm
__lowerCAmelCase : Dict = enforce_input_projection
__lowerCAmelCase : List[str] = common_stride
__lowerCAmelCase : Dict = ignore_value
__lowerCAmelCase : Optional[int] = num_queries
__lowerCAmelCase : Optional[Any] = no_object_weight
__lowerCAmelCase : List[Any] = class_weight
__lowerCAmelCase : Optional[Any] = mask_weight
__lowerCAmelCase : str = dice_weight
__lowerCAmelCase : List[str] = train_num_points
__lowerCAmelCase : List[str] = oversample_ratio
__lowerCAmelCase : str = importance_sample_ratio
__lowerCAmelCase : int = init_std
__lowerCAmelCase : Dict = init_xavier_std
__lowerCAmelCase : Optional[Any] = use_auxiliary_loss
__lowerCAmelCase : Dict = feature_strides
__lowerCAmelCase : List[Any] = output_auxiliary_logits
__lowerCAmelCase : Any = decoder_layers
super().__init__(**_snake_case )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , _snake_case : PretrainedConfig , **_snake_case : Optional[Any] )->List[Any]:
'''simple docstring'''
return cls(
backbone_config=_snake_case , **_snake_case , )
def UpperCAmelCase__ ( self : Dict )->Dict[str, any]:
'''simple docstring'''
__lowerCAmelCase : Dict = copy.deepcopy(self.__dict__ )
__lowerCAmelCase : Dict = self.backbone_config.to_dict()
__lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output | 707 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] ) -> list[list[float]]:
__lowerCAmelCase : Optional[int] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCAmelCase : Optional[int] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCAmelCase : List[Any] = [[0.0, 0.0], [0.0, 0.0]]
__lowerCAmelCase , __lowerCAmelCase : str = matrix[1][1], matrix[0][0]
__lowerCAmelCase , __lowerCAmelCase : str = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCAmelCase : Union[str, Any] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
__lowerCAmelCase : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCAmelCase : Optional[int] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCAmelCase : str = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCAmelCase : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCAmelCase : Any = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCAmelCase : List[str] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCAmelCase : Dict = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCAmelCase : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCAmelCase : Tuple = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCAmelCase : Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCAmelCase : Optional[Any] = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
__lowerCAmelCase : List[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCAmelCase : List[Any] = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" ) | 240 | 0 |
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase_ : list[bool | None] = [None] * 10_000_000
UpperCamelCase_ : int = True
UpperCamelCase_ : Any = False
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
a__ = chain(next_number(_lowercase ) )
a__ = number_chain
while number < 10_00_00_00:
a__ = number_chain
number *= 10
return number_chain
def _lowerCAmelCase (_lowercase = 10_00_00_00 ):
"""simple docstring"""
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }")
| 331 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = int(number**0.5 )
return number == sq * sq
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a__ = x_den * y_den * z_den
a__ = gcd(_lowercase , _lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def _lowerCAmelCase (_lowercase = 35 ):
"""simple docstring"""
a__ = set()
a__ = 42
a__ = Fraction(0 )
a__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a__ = x_num * y_den + x_den * y_num
a__ = x_den * y_den
a__ = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
a__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a__ = x_den * x_den * y_den * y_den
if is_sq(_lowercase ) and is_sq(_lowercase ):
a__ = int(sqrt(_lowercase ) )
a__ = int(sqrt(_lowercase ) )
a__ = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=-1
a__ = x_num * y_num
a__ = x_den * y_num + x_num * y_den
a__ = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
a__ = x_num * x_num * y_num * y_num
a__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowercase ) and is_sq(_lowercase ):
a__ = int(sqrt(_lowercase ) )
a__ = int(sqrt(_lowercase ) )
a__ = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
for num, den in unique_s:
total += Fraction(_lowercase , _lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 331 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Any=1_0 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : int=3_2 * 8 , lowerCAmelCase__ : Union[str, Any]=3_2 * 8 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Any=6_4 , ) -> List[Any]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = is_training
UpperCAmelCase = use_auxiliary_loss
UpperCAmelCase = num_queries
UpperCAmelCase = num_channels
UpperCAmelCase = min_size
UpperCAmelCase = max_size
UpperCAmelCase = num_labels
UpperCAmelCase = hidden_dim
UpperCAmelCase = hidden_dim
def _UpperCamelCase ( self : Any ) -> int:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase__ )
UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase__ )
UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase__ ) > 0.5
).float()
UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase__ ) > 0.5).long()
UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCAmelCase = self.num_queries
UpperCAmelCase = self.num_labels
UpperCAmelCase = [1, 1, 1, 1]
UpperCAmelCase = self.num_channels
UpperCAmelCase = 6_4
UpperCAmelCase = 1_2_8
UpperCAmelCase = self.hidden_dim
UpperCAmelCase = self.hidden_dim
UpperCAmelCase = self.hidden_dim
return config
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = output.encoder_hidden_states
UpperCAmelCase = output.pixel_decoder_hidden_states
UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) , config.decoder_layers )
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=False ) -> Dict:
with torch.no_grad():
UpperCAmelCase = MaskaFormerModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ )
UpperCAmelCase = model(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase__ , UpperCAmelCase__ )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] ) -> List[str]:
UpperCAmelCase = MaskaFormerForUniversalSegmentation(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
def comm_check_on_output(lowerCAmelCase__ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ )
UpperCAmelCase = model(UpperCAmelCase__ )
comm_check_on_output(UpperCAmelCase__ )
UpperCAmelCase = model(
pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ )
comm_check_on_output(UpperCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ ( lowercase__ , lowercase__ , unittest.TestCase ):
UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCAmelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : Union[str, Any] ) -> Any:
UpperCAmelCase = MaskaFormerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Optional[int] ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCAmelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _UpperCamelCase ( self : List[Any] ) -> Dict:
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _UpperCamelCase ( self : List[str] ) -> List[Any]:
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase ( self : int ) -> Tuple:
pass
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(UpperCAmelCase__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@slow
def _UpperCamelCase ( self : Dict ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase = MaskaFormerModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _UpperCamelCase ( self : Tuple ) -> Optional[int]:
UpperCAmelCase = (self.model_tester.min_size,) * 2
UpperCAmelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=UpperCAmelCase__ ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=UpperCAmelCase__ ),
'''class_labels''': torch.zeros(2 , 1_0 , device=UpperCAmelCase__ ).long(),
}
UpperCAmelCase = self.model_tester.get_config()
UpperCAmelCase = MaskaFormerForUniversalSegmentation(UpperCAmelCase__ ).to(UpperCAmelCase__ )
UpperCAmelCase = model(**UpperCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
UpperCAmelCase = model(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def _UpperCamelCase ( self : int ) -> str:
if not self.model_tester.is_training:
return
UpperCAmelCase = self.all_model_classes[1]
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
UpperCAmelCase = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ ).loss
loss.backward()
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = self.all_model_classes[1]
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
model.train()
UpperCAmelCase = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ )
UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1e-4
def _lowerCAmelCase( ):
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCamelCase ( self : Union[str, Any] ) -> Any:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
UpperCAmelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
UpperCAmelCase = model(**UpperCAmelCase__ )
UpperCAmelCase = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
UpperCAmelCase = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
UpperCAmelCase = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ ).eval()
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
UpperCAmelCase = model(**UpperCAmelCase__ )
# masks_queries_logits
UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
UpperCAmelCase = torch.tensor(UpperCAmelCase__ ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
# class_queries_logits
UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ ).eval()
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
UpperCAmelCase = inputs['''pixel_values'''].to(UpperCAmelCase__ )
UpperCAmelCase = [el.to(UpperCAmelCase__ ) for el in inputs['''mask_labels''']]
UpperCAmelCase = [el.to(UpperCAmelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCAmelCase = model(**UpperCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 721 |
def _lowerCAmelCase( __A , __A ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase( __A , __A=0 ):
return sorted(__A , key=lambda __A : x[column] )
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , __A ):
for j in range(max(0 , i - 6 ) , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(__A , __A )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A )
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(__A , __A )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__A )
UpperCAmelCase = dis_between_closest_in_strip(
__A , len(__A ) , __A )
return min(__A , __A )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = column_based_sort(__A , column=0 )
UpperCAmelCase = column_based_sort(__A , column=1 )
return (
closest_pair_of_points_sqr(
__A , __A , __A )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 1 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ):
lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCamelCase : Optional[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCamelCase : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ):
try:
lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(_UpperCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ):
lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval()
else:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}'
lowerCAmelCase = teacher.config.to_diff_dict()
try:
lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCAmelCase )
# Copy weights
lowerCAmelCase = teacher.config_class(**_UpperCAmelCase )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(_UpperCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
if d_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
try:
if hasattr(
_UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
lowerCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_UpperCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 4 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 127 |
import colorsys
from PIL import Image # type: ignore
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : List[Any] = x
snake_case__ : int = y
for step in range(UpperCAmelCase_): # noqa: B007
snake_case__ : str = a * a - b * b + x
snake_case__ : Tuple = 2 * a * b + y
snake_case__ : str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(UpperCAmelCase_ , 1 , 1))
def _lowercase ( UpperCAmelCase_ = 800 , UpperCAmelCase_ = 600 , UpperCAmelCase_ = -0.6 , UpperCAmelCase_ = 0 , UpperCAmelCase_ = 3.2 , UpperCAmelCase_ = 50 , UpperCAmelCase_ = True , ):
"""simple docstring"""
snake_case__ : Dict = Image.new("""RGB""" , (image_width, image_height))
snake_case__ : Optional[Any] = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase_):
for image_y in range(UpperCAmelCase_):
# determine the figure-coordinates based on the image-coordinates
snake_case__ : Optional[int] = figure_width / image_width * image_height
snake_case__ : Optional[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case__ : str = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case__ : int = get_distance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case__ : List[str] = get_color_coded_rgb(UpperCAmelCase_)
else:
snake_case__ : Optional[Any] = get_black_and_white_rgb(UpperCAmelCase_)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase_: List[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 127 | 1 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _lowerCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = (DPMSolverSDEScheduler,)
SCREAMING_SNAKE_CASE_: str = 10
def A ( self , **lowerCAmelCase_ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**lowerCAmelCase_ )
return config
def A ( self ) -> int:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def A ( self ) -> Dict:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def A ( self ) -> Dict:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def A ( self ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def A ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_SCREAMING_SNAKE_CASE : Tuple = self.dummy_model()
_SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE : str = sample.to(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Dict = output.prev_sample
_SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def A ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
_SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_SCREAMING_SNAKE_CASE : List[str] = self.dummy_model()
_SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
_SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCAmelCase_ ) )
_SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3
def A ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = self.dummy_model()
_SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter.to(lowerCAmelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : int = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : int = output.prev_sample
_SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def A ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : str = scheduler_class(**lowerCAmelCase_ , use_karras_sigmas=lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
_SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample_deter.to(lowerCAmelCase_ ) * scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE : List[Any] = sample.to(lowerCAmelCase_ )
for t in scheduler.timesteps:
_SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = output.prev_sample
_SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
| 621 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_: List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_: Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_: Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_: Any = frozenset([] )
def A ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE : List[str] = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
_SCREAMING_SNAKE_CASE : Dict = CLIPTextModel(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_SCREAMING_SNAKE_CASE : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Optional[int]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE : Any = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
_SCREAMING_SNAKE_CASE : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowerCAmelCase_ ).startswith('mps' ):
_SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(lowerCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Any = StableDiffusionInpaintPipeline(**lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[str] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase_ ).images
_SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_SCREAMING_SNAKE_CASE : List[str] = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_SCREAMING_SNAKE_CASE : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_SCREAMING_SNAKE_CASE : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_SCREAMING_SNAKE_CASE : Optional[Any] = 'stabilityai/stable-diffusion-2-inpainting'
_SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[Any] = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='np' , )
_SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A ( self ) -> int:
_SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_SCREAMING_SNAKE_CASE : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_SCREAMING_SNAKE_CASE : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_SCREAMING_SNAKE_CASE : Optional[Any] = 'stabilityai/stable-diffusion-2-inpainting'
_SCREAMING_SNAKE_CASE : str = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[Any] = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='np' , )
_SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A ( self ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_SCREAMING_SNAKE_CASE : str = 'stabilityai/stable-diffusion-2-inpainting'
_SCREAMING_SNAKE_CASE : List[Any] = PNDMScheduler.from_pretrained(lowerCAmelCase_ , subfolder='scheduler' )
_SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Dict = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type='np' , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 621 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
__lowerCamelCase : Any = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
__lowerCamelCase : str = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
__lowerCamelCase : Dict = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a ={
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case = 0 ):
_SCREAMING_SNAKE_CASE : List[str] = key
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_SCREAMING_SNAKE_CASE ) ^ key ) for ch in content]
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_SCREAMING_SNAKE_CASE ) ^ key ) for ch in content]
def UpperCAmelCase_ ( self , __snake_case , __snake_case = 0 ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_SCREAMING_SNAKE_CASE : str = """"""
for ch in content:
ans += chr(ord(_SCREAMING_SNAKE_CASE ) ^ key )
return ans
def UpperCAmelCase_ ( self , __snake_case , __snake_case = 0 ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_SCREAMING_SNAKE_CASE : int = """"""
for ch in content:
ans += chr(ord(_SCREAMING_SNAKE_CASE ) ^ key )
return ans
def UpperCAmelCase_ ( self , __snake_case , __snake_case = 0 ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
try:
with open(_SCREAMING_SNAKE_CASE ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
except OSError:
return False
return True
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
try:
with open(_SCREAMING_SNAKE_CASE ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 533 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: Dict ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = R'\w+[.]\d+'
_UpperCAmelCase = re.findall(a__ , a__ )
for pat in pats:
_UpperCAmelCase = key.replace(a__ , '_'.join(pat.split('.' ) ) )
return key
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: List[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
_UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase__ ( a__: Any , a__: Optional[Any] , a__: int=4_2 ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_UpperCAmelCase = flax_model.init_weights(PRNGKey(a__ ) )
_UpperCAmelCase = flatten_dict(a__ )
_UpperCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
_UpperCAmelCase , _UpperCAmelCase = rename_key_and_reshape_tensor(a__ , a__ , a__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_UpperCAmelCase = jnp.asarray(a__ )
return unflatten_dict(a__ )
| 618 | 0 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCamelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCamelCase_ ( snake_case_ : str ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = np.max(_outputs , axis=-1 , keepdims=snake_case_ )
__lowerCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case_ )
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''sigmoid'''
_SCREAMING_SNAKE_CASE : Any = '''softmax'''
_SCREAMING_SNAKE_CASE : List[str] = '''none'''
@add_end_docstrings(
_UpperCAmelCase , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Tuple = ClassificationFunction.NONE
def __init__( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
super().__init__(**A_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def a ( self : str , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Any="" , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__lowerCAmelCase = tokenizer_kwargs
__lowerCAmelCase = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
__lowerCAmelCase = self.model.config.return_all_scores
if isinstance(A_ , A_ ) or top_k is None:
__lowerCAmelCase = top_k
__lowerCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , A_ , )
if return_all_scores:
__lowerCAmelCase = None
else:
__lowerCAmelCase = 1
if isinstance(A_ , A_ ):
__lowerCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__lowerCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Any , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
__lowerCAmelCase = super().__call__(*A_ , **A_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__lowerCAmelCase = """top_k""" not in kwargs
if isinstance(args[0] , A_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict[str, GenericTensor]:
__lowerCAmelCase = self.framework
if isinstance(A_ , A_ ):
return self.tokenizer(**A_ , return_tensors=A_ , **A_ )
elif isinstance(A_ , A_ ) and len(A_ ) == 1 and isinstance(inputs[0] , A_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=A_ , **A_ )
elif isinstance(A_ , A_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(A_ , return_tensors=A_ , **A_ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return self.model(**A_ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True ) -> Union[str, Any]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__lowerCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__lowerCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
__lowerCAmelCase = self.model.config.function_to_apply
else:
__lowerCAmelCase = ClassificationFunction.NONE
__lowerCAmelCase = model_outputs["""logits"""][0]
__lowerCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__lowerCAmelCase = sigmoid(A_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__lowerCAmelCase = softmax(A_ )
elif function_to_apply == ClassificationFunction.NONE:
__lowerCAmelCase = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__lowerCAmelCase = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(A_ )
]
if not _legacy:
dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE__ : x["score"] , reverse=A_ )
if top_k is not None:
__lowerCAmelCase = dict_scores[:top_k]
return dict_scores
| 705 | '''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_A : List[str] = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaTokenizer
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Tuple = True
def a ( self : Any ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = SpeechTaTokenizer(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = AddedToken("""<mask>""" , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=20 , SCREAMING_SNAKE_CASE__ : List[str]=5 ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
return text, ids
def a ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> List[Any]:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 81 )
def a ( self : Optional[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowerCAmelCase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowerCAmelCase = tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size + len(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowerCAmelCase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowerCAmelCase = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size_a + len(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def a ( self : Tuple ) -> Union[str, Any]:
pass
def a ( self : Optional[Any] ) -> List[str]:
pass
def a ( self : Optional[Any] ) -> str:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def a ( self : Optional[Any] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
__lowerCAmelCase = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
__lowerCAmelCase = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=SCREAMING_SNAKE_CASE__ , )
| 330 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> list[list[int]]:
'''simple docstring'''
__snake_case = []
create_all_state(1 , _lowerCamelCase , _lowerCamelCase , [] , _lowerCamelCase )
return result
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : list[list[int]] , )-> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCamelCase , total_number - level + 2 ):
current_list.append(_lowerCamelCase )
create_all_state(i + 1 , _lowerCamelCase , level - 1 , _lowerCamelCase , _lowerCamelCase )
current_list.pop()
def _UpperCamelCase (_lowerCamelCase : list[list[int]] )-> None:
'''simple docstring'''
for i in total_list:
print(*_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : str = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
'''simple docstring'''
def __snake_case ( lowercase : Any ):
if not isinstance(_snake_case , _snake_case ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 420 | 0 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ : Optional[int] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
UpperCAmelCase_ : Tuple = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
UpperCAmelCase_ : Tuple = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string",id="token" ),id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string",id="token" ),id="sequence" ),id="references" ),
} ),)
def lowerCamelCase_ ( self : int,__A : List[List[List[str]]],__A : List[List[str]],__A : int = 1,__A : int = 4,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__A,hypotheses=__A,min_len=__A,max_len=__A )
} | 44 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Union[str, Any] = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 570 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case_ :
def __init__( self :Optional[int] ,__snake_case :str ,__snake_case :Tuple=13 ,__snake_case :int=7 ,__snake_case :Optional[int]=True ,__snake_case :List[Any]=True ,__snake_case :int=True ,__snake_case :int=True ,__snake_case :int=99 ,__snake_case :Optional[Any]=32 ,__snake_case :int=2 ,__snake_case :List[Any]=4 ,__snake_case :int=37 ,__snake_case :Union[str, Any]="gelu" ,__snake_case :int=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=5_12 ,__snake_case :List[Any]=16 ,__snake_case :Union[str, Any]=2 ,__snake_case :Dict=0.02 ,__snake_case :List[Any]=3 ,__snake_case :Tuple=4 ,__snake_case :int=None ,__snake_case :Tuple=10_00 ,) -> str:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = range_bbox
def lowerCamelCase__( self :str ) -> Dict:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
a__ = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ = bbox[i, j, 3]
a__ = bbox[i, j, 1]
a__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ = bbox[i, j, 2]
a__ = bbox[i, j, 0]
a__ = t
a__ = tf.convert_to_tensor(UpperCamelCase_ )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = LayoutLMConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__( self :List[str] ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :Optional[int] ,__snake_case :List[str] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[Any] ) -> Any:
a__ = TFLayoutLMModel(config=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def lowerCamelCase__( self :List[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :List[str] ,__snake_case :Dict ,__snake_case :List[str] ,__snake_case :List[str] ) -> Optional[Any]:
a__ = TFLayoutLMForMaskedLM(config=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ ,labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Dict ,__snake_case :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :List[str] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :Union[str, Any] ) -> Dict:
a__ = self.num_labels
a__ = TFLayoutLMForSequenceClassification(config=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :Any ,__snake_case :str ,__snake_case :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Dict ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :int ,__snake_case :List[Any] ) -> Optional[int]:
a__ = self.num_labels
a__ = TFLayoutLMForTokenClassification(config=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ ,labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :Tuple ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :str ) -> Union[str, Any]:
a__ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCamelCase__( self :List[str] ) -> int:
a__ = self.prepare_config_and_inputs()
(
a__
) = config_and_inputs
a__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Union[str, Any] = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Dict = 1_0
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = TFLayoutLMModelTester(self )
a__ = ConfigTester(self ,config_class=UpperCamelCase_ ,hidden_size=37 )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :Any ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def lowerCamelCase__( self :Optional[int] ) -> int:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
def lowerCamelCase__( self :int ) -> int:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
@slow
def lowerCamelCase__( self :str ) -> Dict:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFLayoutLMModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
pass
def __lowercase ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
a__ = tf.convert_to_tensor([[1_0_1,1_0_1_9,1_0_1_4,1_0_1_6,1_0_3_7,1_2_8_4_9,4_7_4_7,1_0_0_4,1_4_2_4_6,2_2_7_8,5_4_3_9,4_5_2_4,5_0_0_2,2_9_3_0,2_1_9_3,2_9_3_0,4_3_4_1,3_2_0_8,1_0_0_5,1_0_5_5,2_1_7_1,2_8_4_8,1_1_3_0_0,3_5_3_1,1_0_2],[1_0_1,4_0_7_0,4_0_3_4,7_0_2_0,1_0_2_4,3_0_5_8,1_0_1_5,1_0_1_3,2_8_6_1,1_0_1_3,6_0_7_0,1_9_2_7_4,2_7_7_2,6_2_0_5,2_7_8_1_4,1_6_1_4_7,1_6_1_4_7,4_3_4_3,2_0_4_7,1_0_2_8_3,1_0_9_6_9,1_4_3_8_9,1_0_1_2,2_3_3_8,1_0_2]] ) # noqa: E231
a__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
a__ = tf.convert_to_tensor([[[0,0,0,0],[4_2_3,2_3_7,4_4_0,2_5_1],[4_2_7,2_7_2,4_4_1,2_8_7],[4_1_9,1_1_5,4_3_7,1_2_9],[9_6_1,8_8_5,9_9_2,9_1_2],[2_5_6,3_8,3_3_0,5_8],[2_5_6,3_8,3_3_0,5_8],[3_3_6,4_2,3_5_3,5_7],[3_6_0,3_9,4_0_1,5_6],[3_6_0,3_9,4_0_1,5_6],[4_1_1,3_9,4_7_1,5_9],[4_7_9,4_1,5_2_8,5_9],[5_3_3,3_9,6_3_0,6_0],[6_7,1_1_3,1_3_4,1_3_1],[1_4_1,1_1_5,2_0_9,1_3_2],[6_8,1_4_9,1_3_3,1_6_6],[1_4_1,1_4_9,1_8_7,1_6_4],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[2_9_5,1_4_8,3_4_9,1_6_5],[4_4_1,1_4_9,4_9_2,1_6_6],[4_9_7,1_4_9,5_4_6,1_6_4],[6_4,2_0_1,1_2_5,2_1_8],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]],[[0,0,0,0],[6_6_2,1_5_0,7_5_4,1_6_6],[6_6_5,1_9_9,7_4_2,2_1_1],[5_1_9,2_1_3,5_5_4,2_2_8],[5_1_9,2_1_3,5_5_4,2_2_8],[1_3_4,4_3_3,1_8_7,4_5_4],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[3_1_4,4_6_9,3_7_6,4_8_2],[5_0_4,6_8_4,5_8_2,7_0_6],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[6_1_0,7_4_9,6_5_2,7_6_5],[1_3_0,6_5_9,1_6_8,6_7_2],[1_7_6,6_5_7,2_3_7,6_7_2],[2_3_8,6_5_7,3_1_2,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[7_1_6,3_0_1,8_2_5,3_1_7],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]]] ) # noqa: E231
a__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
a__ = tf.convert_to_tensor([[-1_0_0,1_0,1_0,1_0,9,1,-1_0_0,7,7,-1_0_0,7,7,4,2,5,2,8,8,-1_0_0,-1_0_0,5,0,3,2,-1_0_0],[-1_0_0,1_2,1_2,1_2,-1_0_0,1_2,1_0,-1_0_0,-1_0_0,-1_0_0,-1_0_0,1_0,1_2,9,-1_0_0,-1_0_0,-1_0_0,1_0,1_0,1_0,9,1_2,-1_0_0,1_0,-1_0_0]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :Any ) -> Any:
a__ = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
a__ = prepare_layoutlm_batch_inputs()
# forward pass
a__ = model(input_ids=UpperCamelCase_ ,bbox=UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
# test the sequence output on [0, :3, :3]
a__ = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] ,)
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,UpperCamelCase_ ,atol=1E-3 ) )
# test the pooled output on [1, :3]
a__ = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,UpperCamelCase_ ,atol=1E-3 ) )
@slow
def lowerCamelCase__( self :List[Any] ) -> int:
a__ = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' ,num_labels=2 )
a__ = prepare_layoutlm_batch_inputs()
# forward pass
a__ = model(
input_ids=UpperCamelCase_ ,bbox=UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ ,labels=tf.convert_to_tensor([1, 1] ) ,)
# test whether we get a loss as a scalar
a__ = outputs.loss
a__ = (2,)
self.assertEqual(loss.shape ,UpperCamelCase_ )
# test the shape of the logits
a__ = outputs.logits
a__ = (2, 2)
self.assertEqual(logits.shape ,UpperCamelCase_ )
@slow
def lowerCamelCase__( self :Any ) -> Any:
a__ = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' ,num_labels=13 )
a__ = prepare_layoutlm_batch_inputs()
# forward pass
a__ = model(
input_ids=UpperCamelCase_ ,bbox=UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ ,labels=UpperCamelCase_ )
# test the shape of the logits
a__ = outputs.logits
a__ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape ,UpperCamelCase_ )
@slow
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
a__ = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
a__ = prepare_layoutlm_batch_inputs()
# forward pass
a__ = model(input_ids=UpperCamelCase_ ,bbox=UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
# test the shape of the logits
a__ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape ,UpperCamelCase_ )
self.assertEqual(outputs.end_logits.shape ,UpperCamelCase_ )
| 704 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 0 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any ):
lowerCAmelCase = k_size // 2
lowerCAmelCase ,lowerCAmelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ):
lowerCAmelCase ,lowerCAmelCase = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase = height - k_size + 1
lowerCAmelCase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
lowerCAmelCase = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = ravel(_UpperCAmelCase )
# reshape and get the dst image
lowerCAmelCase = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__UpperCamelCase : Tuple = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
__UpperCamelCase : int = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCamelCase : Optional[Any] = gaussian_filter(gray, 3, sigma=1)
__UpperCamelCase : Tuple = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 4 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _A ( snake_case ) -> Union[str, Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
_lowercase : Any = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowercase : List[str] = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowercase : str = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowercase : Optional[int] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowercase : Any = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowercase : Dict = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowercase : List[str] = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowercase : Dict = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowercase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowercase : str = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowercase : Optional[Any] = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowercase : int = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowercase : Dict = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowercase : Union[str, Any] = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowercase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowercase : Tuple = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowercase : int = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowercase : str = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowercase : int = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowercase : Tuple = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowercase : str = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowercase : int = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowercase : str = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowercase : Optional[int] = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def _A ( snake_case , snake_case ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
_lowercase : List[Any] = orig_state_dict.pop(snake_case )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowercase : Optional[int] = key.split("." )
_lowercase , _lowercase : str = int(key_split[2] ), int(key_split[4] )
_lowercase : Union[str, Any] = config.vision_config.hidden_size
if "weight" in key:
_lowercase : Tuple = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : List[str] = val[-dim:, :]
else:
_lowercase : Optional[int] = val[:dim]
_lowercase : int = val[dim : dim * 2]
_lowercase : Optional[int] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowercase : int = key.split("." )
_lowercase : str = int(key_split[3] )
_lowercase : List[Any] = config.text_config.hidden_size
if "weight" in key:
_lowercase : Any = val[:dim, :]
_lowercase : str = val[
dim : dim * 2, :
]
_lowercase : str = val[-dim:, :]
else:
_lowercase : List[Any] = val[:dim]
_lowercase : int = val[dim : dim * 2]
_lowercase : Dict = val[-dim:]
else:
_lowercase : List[str] = rename_key(snake_case )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowercase : int = val.squeeze_()
else:
_lowercase : Union[str, Any] = val
return orig_state_dict
def _A ( ) -> List[Any]:
_lowercase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase : Dict = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def _A ( snake_case , snake_case , snake_case="groupvit-gcc-yfcc" , snake_case=False ) -> Tuple:
_lowercase : str = GroupViTConfig()
_lowercase : Union[str, Any] = GroupViTModel(snake_case ).eval()
_lowercase : str = torch.load(snake_case , map_location="cpu" )["model"]
_lowercase : Optional[int] = convert_state_dict(snake_case , snake_case )
_lowercase , _lowercase : Dict = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(snake_case ) == 0)
# verify result
_lowercase : Dict = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowercase : Any = prepare_img()
_lowercase : Optional[Any] = processor(text=["a photo of a cat", "a photo of a dog"] , images=snake_case , padding=snake_case , return_tensors="pt" )
with torch.no_grad():
_lowercase : Optional[int] = model(**snake_case )
if model_name == "groupvit-gcc-yfcc":
_lowercase : Tuple = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
_lowercase : List[str] = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , snake_case , atol=1E-3 )
processor.save_pretrained(snake_case )
model.save_pretrained(snake_case )
print("Successfully saved processor and model to" , snake_case )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case , organization="nielsr" )
model.push_to_hub(snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
_snake_case = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 245 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = ['''image_processor''', '''tokenizer''']
__lowerCamelCase : str = '''ViTImageProcessor'''
__lowerCamelCase : List[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,SCREAMING_SNAKE_CASE_ ,)
snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
snake_case : str = self.tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if visual_prompt is not None:
snake_case : Optional[Any] = self.image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if images is not None:
snake_case : int = self.image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if visual_prompt is not None and images is not None:
snake_case : int = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
snake_case : List[str] = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,SCREAMING_SNAKE_CASE_ ,)
return self.image_processor_class
@property
def snake_case_ ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,SCREAMING_SNAKE_CASE_ ,)
return self.image_processor
| 315 |
def lowercase ( __A : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Dict = [0] * len(__A )
snake_case : int = []
snake_case : Optional[Any] = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
snake_case : Any = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case : Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
__lowercase : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 315 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase_: Optional[Any] = logging.get_logger(__name__)
lowercase_: Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase_: List[str] = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
lowercase_: Optional[int] = {'allegro/herbert-base-cased': 5_14}
lowercase_: List[str] = {}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[Any] = HerbertTokenizer
def __init__( self : str , __a : int=None , __a : Tuple=None , __a : List[Any]=None , __a : Tuple="<s>" , __a : List[Any]="<unk>" , __a : Any="<pad>" , __a : List[str]="<mask>" , __a : Optional[Any]="</s>" , **__a : List[Any] , ):
super().__init__(
__a , __a , tokenizer_file=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , sep_token=__a , **__a , )
def lowercase ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None ):
snake_case__ : Any = [self.cls_token_id]
snake_case__ : str = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def lowercase ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ):
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Any , __a : str , __a : Optional[str] = None ):
snake_case__ : Optional[Any] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 648 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ (__snake_case ):
"""simple docstring"""
def __init__( self : Tuple , __a : Union[str, Any] , __a : Optional[int]=1_3 , __a : Tuple=7 , __a : Dict=True , __a : List[Any]=True , __a : str=True , __a : str=True , __a : Any=9_9 , __a : Optional[Any]=3_2 , __a : Tuple=5 , __a : Union[str, Any]=4 , __a : List[str]=3_7 , __a : Optional[int]="gelu" , __a : str=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=5_1_2 , __a : List[Any]=1_6 , __a : List[Any]=2 , __a : Optional[Any]=0.02 , __a : List[Any]=False , __a : List[Any]=True , __a : Optional[int]="None" , __a : Optional[Any]=3 , __a : Optional[int]=4 , __a : Optional[int]=None , ):
snake_case__ : Dict = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Any = is_training
snake_case__ : int = use_input_mask
snake_case__ : str = use_token_type_ids
snake_case__ : Union[str, Any] = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : Optional[Any] = num_labels
snake_case__ : Dict = num_choices
snake_case__ : Optional[Any] = relative_attention
snake_case__ : Any = position_biased_input
snake_case__ : Union[str, Any] = pos_att_type
snake_case__ : Optional[int] = scope
def lowercase ( self : Optional[Any] ):
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Dict = None
if self.use_input_mask:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : str = None
snake_case__ : Union[str, Any] = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : List[Any] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase ( self : Dict , __a : Optional[int] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase ( self : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[int] , __a : List[str] , __a : str ):
snake_case__ : Union[str, Any] = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
snake_case__ : Any = model(__a , attention_mask=__a , token_type_ids=__a )[0]
snake_case__ : Union[str, Any] = model(__a , token_type_ids=__a )[0]
snake_case__ : List[str] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase ( self : Tuple , __a : List[str] , __a : List[Any] , __a : int , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : str ):
snake_case__ : Optional[int] = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
snake_case__ : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Tuple , __a : int , __a : str , __a : Tuple , __a : Optional[Any] , __a : Tuple , __a : Optional[int] , __a : List[str] ):
snake_case__ : Tuple = self.num_labels
snake_case__ : int = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
snake_case__ : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def lowercase ( self : Tuple , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Any , __a : List[str] , __a : List[Any] , __a : List[str] ):
snake_case__ : Union[str, Any] = self.num_labels
snake_case__ : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
snake_case__ : Optional[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Dict , __a : List[str] , __a : List[str] , __a : Tuple , __a : Union[str, Any] , __a : Any , __a : Optional[Any] , __a : Any ):
snake_case__ : int = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
snake_case__ : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : List[str] , __a : Optional[Any] , __a : Tuple , __a : List[str] , __a : Optional[Any] , __a : List[str] , __a : str , __a : str ):
snake_case__ : Any = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
snake_case__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Any = config_and_inputs
snake_case__ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ (__snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Any = True
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : int = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[int] = False
def lowercase ( self : Optional[Any] ):
snake_case__ : Any = DebertaVaModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def lowercase ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def lowercase ( self : Tuple ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def lowercase ( self : List[str] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def lowercase ( self : int ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def lowercase ( self : Optional[int] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def lowercase ( self : Union[str, Any] ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def lowercase ( self : Optional[int] ):
pass
@slow
def lowercase ( self : int ):
snake_case__ : List[Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
snake_case__ : Any = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
snake_case__ : List[str] = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 648 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ =logging.get_logger(__name__)
__magic_name__ ={
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Dict ="biogpt"
def __init__(self , SCREAMING_SNAKE_CASE_=4_2384 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = scale_embedding
UpperCamelCase__ = use_cache
UpperCamelCase__ = layerdrop
UpperCamelCase__ = activation_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 469 | from __future__ import annotations
import pandas as pd
def __UpperCamelCase ( A , A , A ):
UpperCamelCase__ = [0] * no_of_processes
UpperCamelCase__ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A ):
UpperCamelCase__ = burst_time[i]
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 999999999
UpperCamelCase__ = 0
UpperCamelCase__ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCamelCase__ = remaining_time[j]
UpperCamelCase__ = j
UpperCamelCase__ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCamelCase__ = remaining_time[short]
if minm == 0:
UpperCamelCase__ = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCamelCase__ = False
# Find finish time of current process
UpperCamelCase__ = increment_time + 1
# Calculate waiting time
UpperCamelCase__ = finish_time - arrival_time[short]
UpperCamelCase__ = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCamelCase__ = 0
# Increment time
increment_time += 1
return waiting_time
def __UpperCamelCase ( A , A , A ):
UpperCamelCase__ = [0] * no_of_processes
for i in range(A ):
UpperCamelCase__ = burst_time[i] + waiting_time[i]
return turn_around_time
def __UpperCamelCase ( A , A , A ):
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for i in range(A ):
UpperCamelCase__ = total_waiting_time + waiting_time[i]
UpperCamelCase__ = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
__magic_name__ =int(input())
__magic_name__ =[0] * no_of_processes
__magic_name__ =[0] * no_of_processes
__magic_name__ =list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
__magic_name__ , __magic_name__ =map(int, input().split())
__magic_name__ =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__magic_name__ =burst_time
__magic_name__ =no_of_processes
__magic_name__ =waiting_time
__magic_name__ =calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__magic_name__ =pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 469 | 1 |
from __future__ import annotations
import numpy as np
def lowerCamelCase__ ( __lowerCamelCase : list[float] ):
return np.maximum(0 , __lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 63 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = "cpu"
SCREAMING_SNAKE_CASE_ = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
SCREAMING_SNAKE_CASE_ = "path-to-your-trained-model"
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
SCREAMING_SNAKE_CASE_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE_ = pipe.to(device)
# to channels last
SCREAMING_SNAKE_CASE_ = pipe.unet.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE_ = pipe.vae.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
SCREAMING_SNAKE_CASE_ = torch.randn(2, 4, 6_4, 6_4)
SCREAMING_SNAKE_CASE_ = torch.rand(1) * 9_9_9
SCREAMING_SNAKE_CASE_ = torch.randn(2, 7_7, 7_6_8)
SCREAMING_SNAKE_CASE_ = (sample, timestep, encoder_hidden_status)
try:
SCREAMING_SNAKE_CASE_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
SCREAMING_SNAKE_CASE_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
SCREAMING_SNAKE_CASE_ = 6_6_6
SCREAMING_SNAKE_CASE_ = torch.Generator(device).manual_seed(seed)
SCREAMING_SNAKE_CASE_ = {"generator": generator}
if args.steps is not None:
SCREAMING_SNAKE_CASE_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
SCREAMING_SNAKE_CASE_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 597 | 0 |
"""simple docstring"""
import numpy as np
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return vector * sigmoid(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
A : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _lowerCamelCase ( _UpperCamelCase = "dhaka" , _UpperCamelCase = 5 ):
'''simple docstring'''
__lowerCAmelCase = min(_UpperCamelCase , 50 ) # Prevent abuse!
__lowerCAmelCase = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
__lowerCAmelCase = requests.get("https://www.google.com/search" , params=_UpperCamelCase , headers=_UpperCamelCase )
__lowerCAmelCase = BeautifulSoup(html.text , "html.parser" )
__lowerCAmelCase = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
__lowerCAmelCase = json.dumps(_UpperCamelCase )
__lowerCAmelCase = json.loads(_UpperCamelCase )
__lowerCAmelCase = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , _UpperCamelCase , )
if not matched_google_image_data:
return 0
__lowerCAmelCase = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(_UpperCamelCase ) , )
__lowerCAmelCase = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , _UpperCamelCase , )
for index, fixed_full_res_image in enumerate(_UpperCamelCase ):
if index >= max_images:
return index
__lowerCAmelCase = bytes(_UpperCamelCase , "ascii" ).decode(
"unicode-escape" )
__lowerCAmelCase = bytes(_UpperCamelCase , "ascii" ).decode(
"unicode-escape" )
__lowerCAmelCase = urllib.request.build_opener()
__lowerCAmelCase = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(_UpperCamelCase )
__lowerCAmelCase = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
urllib.request.urlretrieve( # noqa: S310
_UpperCamelCase , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
A : Any = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print("Please provide a search term.")
raise
| 282 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.