code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["MobileViTFeatureExtractor"]
UpperCAmelCase__ = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ):
__UpperCamelCase =1
__UpperCamelCase =0
__UpperCamelCase =1
__UpperCamelCase =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
UpperCAmelCase_ = downstream_dict["projector.weight"]
UpperCAmelCase_ = downstream_dict["projector.bias"]
UpperCAmelCase_ = downstream_dict["model.post_net.linear.weight"]
UpperCAmelCase_ = downstream_dict["model.post_net.linear.bias"]
return model
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
UpperCAmelCase_ = downstream_dict["model.linear.weight"]
UpperCAmelCase_ = downstream_dict["model.linear.bias"]
return model
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForXVector.from_pretrained(snake_case_ , config=snake_case_ )
UpperCAmelCase_ = downstream_dict["connector.weight"]
UpperCAmelCase_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCAmelCase_ = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
UpperCAmelCase_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = checkpoint["Downstream"]
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(snake_case_ )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
UpperCAmelCase_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
UpperCAmelCase_ = convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith("ForAudioFrameClassification" ):
UpperCAmelCase_ = convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith("ForXVector" ):
UpperCAmelCase_ = convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = CTRLTokenizer
lowerCAmelCase__ : str = False
lowerCAmelCase__ : List[Any] = False
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowercase__ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ = {'''unk_token''': '''<unk>'''}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase ) )
def UpperCamelCase__ (self : int , **UpperCamelCase : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt react readapt apt'''
return input_text, output_text
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
| 2 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
'''simple docstring'''
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Any:
"""simple docstring"""
A : Tuple = data
A : Optional[Any] = previous
A : Union[str, Any] = next_node
def __str__( self ) -> str:
"""simple docstring"""
return F'{self.data}'
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self.data
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return self.next
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.previous
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = head
def __iter__( self ) -> Any:
"""simple docstring"""
return self
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
if not self.current:
raise StopIteration
else:
A : List[str] = self.current.get_data()
A : Union[str, Any] = self.current.get_next()
return value
class A :
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
A : int = None # First node in list
A : str = None # Last node in list
def __str__( self ) -> int:
"""simple docstring"""
A : int = self.head
A : Optional[int] = []
while current is not None:
nodes.append(current.get_data() )
A : List[str] = current.get_next()
return " ".join(str(SCREAMING_SNAKE_CASE ) for node in nodes )
def __contains__( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : str = self.head
while current:
if current.get_data() == value:
return True
A : Optional[int] = current.get_next()
return False
def __iter__( self ) -> int:
"""simple docstring"""
return LinkedListIterator(self.head )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
if self.head:
return self.head.get_data()
return None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.tail:
return self.tail.get_data()
return None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.head is None:
A : Any = node
A : List[Any] = node
else:
self.insert_before_node(self.head , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE )
else:
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : List[Any] = Node(SCREAMING_SNAKE_CASE )
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE )
else:
self.set_tail(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Tuple = node
A : int = node.previous
if node.get_previous() is None:
A : int = node_to_insert
else:
A : Tuple = node_to_insert
A : Optional[int] = node_to_insert
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Tuple = node
A : int = node.next
if node.get_next() is None:
A : Optional[int] = node_to_insert
else:
A : Tuple = node_to_insert
A : List[str] = node_to_insert
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : int = 1
A : int = Node(SCREAMING_SNAKE_CASE )
A : List[str] = self.head
while node:
if current_position == position:
self.insert_before_node(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return
current_position += 1
A : Any = node.next
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Node:
"""simple docstring"""
A : str = self.head
while node:
if node.get_data() == item:
return node
A : int = node.get_next()
raise Exception('''Node not found''' )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if (node := self.get_node(SCREAMING_SNAKE_CASE )) is not None:
if node == self.head:
A : Optional[Any] = self.head.get_next()
if node == self.tail:
A : Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if node.get_next():
A : Union[str, Any] = node.previous
if node.get_previous():
A : Optional[int] = node.next
A : int = None
A : Optional[int] = None
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self.head is None
def lowerCAmelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Any=3_0 , UpperCAmelCase__ : str=4_0_0 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=1 / 2_5_5 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Optional[int]=True , ) -> Any:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
lowerCAmelCase = do_pad
def __UpperCAmelCase ( self : int ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=False ) -> str:
if not batched:
lowerCAmelCase = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
lowerCAmelCase , lowerCAmelCase = image.size
else:
lowerCAmelCase , lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase = self.size['shortest_edge']
elif w > h:
lowerCAmelCase = self.size['shortest_edge']
lowerCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase = self.size['shortest_edge']
lowerCAmelCase = self.size['shortest_edge']
else:
lowerCAmelCase = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
lowerCAmelCase = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = DetrImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
lowerCAmelCase = DetrImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : str ) -> str:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'rescale_factor' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_pad' ) )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
lowerCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=UpperCAmelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Dict ) -> str:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : int ) -> Tuple:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# prepare image and target
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowerCAmelCase = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase__ ) )
# verify boxes
lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase__ ) )
# verify class_labels
lowerCAmelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase__ ) )
# verify orig_size
lowerCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase__ ) )
# verify size
lowerCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase__ ) )
@slow
def __UpperCAmelCase ( self : str ) -> str:
# prepare image, target and masks_path
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowerCAmelCase = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase__ ) )
# verify boxes
lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase__ ) )
# verify class_labels
lowerCAmelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase__ ) )
# verify masks
lowerCAmelCase = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCAmelCase__ )
# verify orig_size
lowerCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase__ ) )
# verify size
lowerCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase__ ) )
| 4 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ ) -> float:
return 0.0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show()
| 62 | 0 |
def UpperCAmelCase_ ( __snake_case = 10**12 ) -> int:
"""simple docstring"""
_lowercase =1
_lowercase =0
_lowercase =1
_lowercase =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __A( a ):
snake_case_ = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Any:
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_snake_case )
return config
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
self.check_over_configs(thresholding=_snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = len(_snake_case )
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
__a = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a = pred_prev_sample
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a = scheduler_class(**_snake_case )
__a = len(_snake_case )
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
__a = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a = pred_prev_sample
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_snake_case )
__a = scheduler.timesteps
for i, timestep in enumerate(_snake_case ):
if i == len(_snake_case ) - 1:
__a = -1
else:
__a = timesteps[i + 1]
__a = scheduler.previous_timestep(_snake_case )
__a = prev_t.item()
self.assertEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [100, 87, 50, 51, 0]
with self.assertRaises(_snake_case , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [100, 87, 50, 1, 0]
__a = len(_snake_case )
with self.assertRaises(_snake_case , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_snake_case , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_snake_case ) | 6 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "mvp"
UpperCAmelCase__ : Tuple = ["past_key_values"]
UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =classifier_dropout
__UpperCamelCase =use_cache
__UpperCamelCase =encoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase =use_prompt
__UpperCamelCase =prompt_length
__UpperCamelCase =prompt_mid_dim
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
__UpperCamelCase =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 62 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
lowercase_ = 2048
lowercase_ = 4096
lowercase_ = 42
lowercase_ = os.environ.pop("PROCESS_TRAIN", "false")
lowercase_ = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
'''simple docstring'''
def choose_first(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
A__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
A__ = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
A__ = {'id': example['id']}
A__ = example['annotations']
A__ = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
A__ = ['yes'] if 1 in yes_no_answer else ['no']
A__ = A__ = []
A__ = A__ = []
A__ = ['<cls>']
else:
A__ = ['short']
A__ = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
A__ = ['long']
A__ = choose_first(annotation['long_answer'] , is_long_answer=SCREAMING_SNAKE_CASE__ )
A__ = []
answer.update(SCREAMING_SNAKE_CASE__ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
A__ = True
else:
A__ = False
A__ = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , SCREAMING_SNAKE_CASE__ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> int:
'''simple docstring'''
A__ = _get_single_answer(SCREAMING_SNAKE_CASE__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = example['document']['tokens']
A__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(SCREAMING_SNAKE_CASE__ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
A__ = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
A__ = example['document']['tokens']
A__ = answer['start_token']
A__ = answer['end_token']
A__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
A__ = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
A__ = doc['is_html'][answer['start_token'] : answer['end_token']]
A__ = doc['token'][answer['start_token'] : answer['end_token']]
A__ = ' '.join([old[i] for i in range(len(SCREAMING_SNAKE_CASE__ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , SCREAMING_SNAKE_CASE__ , end='\n' )
print('Old:' , SCREAMING_SNAKE_CASE__ , end='\n\n' )
return {
"context": " ".join(SCREAMING_SNAKE_CASE__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=2048 , SCREAMING_SNAKE_CASE__ : str=4096 , SCREAMING_SNAKE_CASE__ : Optional[int]=True ) -> Union[str, Any]:
'''simple docstring'''
A__ = get_context_and_ans(SCREAMING_SNAKE_CASE__ , assertion=SCREAMING_SNAKE_CASE__ )
A__ = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
A__ = tokenizer(example['question']['text'] , out['context'] ).input_ids
A__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = []
A__ = []
A__ = input_ids[:q_len]
A__ = range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , max_length - doc_stride )
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(SCREAMING_SNAKE_CASE__ ),
"end_token": [-100] * len(SCREAMING_SNAKE_CASE__ ),
"category": category,
},
}
A__ = out['context'].split()
A__ = splitted_context[answer['end_token']]
A__ = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=SCREAMING_SNAKE_CASE__ , ).input_ids )
A__ = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
A__ = len(tokenizer(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
A__ = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
A__ = answer['start_token']
A__ = answer['end_token']
if assertion:
A__ = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , SCREAMING_SNAKE_CASE__ , end='\n\n' )
if len(SCREAMING_SNAKE_CASE__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
A__ = input_ids[:q_len]
A__ = range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , max_length - doc_stride )
A__ = []
A__ = []
A__ = []
A__ = [] # null, yes, no, long, short
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
A__ = start_token - i + q_len
A__ = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
A__ = -100
A__ = -100
answers_category.append('null' )
A__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(SCREAMING_SNAKE_CASE__ )
answers_end_token.append(SCREAMING_SNAKE_CASE__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) )
print('Old:' , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=2048 , SCREAMING_SNAKE_CASE__ : Optional[int]=4096 , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Optional[int]:
'''simple docstring'''
A__ = get_strided_contexts_and_ans(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , doc_stride=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , assertion=SCREAMING_SNAKE_CASE__ , )
return example
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
with jsonlines.open(SCREAMING_SNAKE_CASE__ , 'a' ) as writer:
for example in tqdm(SCREAMING_SNAKE_CASE__ , total=len(SCREAMING_SNAKE_CASE__ ) , desc='Saving samples ... ' ):
A__ = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
lowercase_ = load_dataset("natural_questions")
lowercase_ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
lowercase_ = data["train" if PROCESS_TRAIN == "true" else "validation"]
lowercase_ = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
lowercase_ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
lowercase_ = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
lowercase_ = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 7 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = GPTaTokenizer
UpperCAmelCase__ : Any = GPTaTokenizerFast
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : int = {"add_prefix_space": True}
UpperCAmelCase__ : Any = False
def _a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase ={'unk_token': '<unk>'}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _a ( self , **A_ ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , **A_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='lower newer'
__UpperCamelCase ='lower newer'
return input_text, output_text
def _a ( self ) -> List[Any]:
__UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='lower newer'
__UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self ) -> int:
if not self.test_rust_tokenizer:
return
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase ='lower newer'
# Testing tokenization
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids without special tokens
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids with special tokens
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# Testing the unknown token
__UpperCamelCase =tokens + [rust_tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self , *A_ , **A_ ) -> Optional[int]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _a ( self , A_=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
def _a ( self ) -> int:
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__UpperCamelCase =tokenizer.pad_token_id
__UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
__UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ='$$$'
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ )
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =tokenizer.bos_token_id
__UpperCamelCase =tokenizer(A_ )
__UpperCamelCase =tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] , A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__UpperCamelCase =tokenizer.decode(out_s.input_ids )
__UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> Any:
# TODO: change to self.get_tokenizers() when the fast version is implemented
__UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='Encode this.'
__UpperCamelCase ='This one too please.'
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(
A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , )
__UpperCamelCase =encoded_sequence_dict['input_ids']
__UpperCamelCase =encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(A_ ) , len(A_ ) )
__UpperCamelCase =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
__UpperCamelCase =[x for x in filtered_sequence if x is not None]
self.assertEqual(A_ , A_ )
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
def _a ( self ) -> Dict:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# Same as above
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='bos'
__UpperCamelCase =tokenizer.get_vocab()['bos']
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# We changed the bos token
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
| 62 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = DPTConfig()
if "large" in checkpoint_url:
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
snake_case_ = [5, 11, 17, 23]
snake_case_ = [256, 512, 1024, 1024]
snake_case_ = (1, 384, 384)
if "ade" in checkpoint_url:
snake_case_ = True
snake_case_ = 150
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''ade20k-id2label.json'''
snake_case_ = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = [1, 150, 480, 480]
return config, expected_shape
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case_ = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
snake_case_ = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
snake_case_ = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
snake_case_ = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
snake_case_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
snake_case_ = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
snake_case_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
snake_case_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case_ = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
snake_case_ = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
snake_case_ = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
snake_case_ = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
snake_case_ = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
snake_case_ = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
snake_case_ = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
snake_case_ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case_ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
snake_case_ = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
snake_case_ = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
snake_case_ = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
snake_case_ = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
snake_case_ = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
snake_case_ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
snake_case_ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
snake_case_ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
snake_case_ = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
snake_case_ = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
snake_case_ = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
snake_case_ = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
snake_case_ = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
snake_case_ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[: config.hidden_size, :]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ():
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_ = get_dpt_config(SCREAMING_SNAKE_CASE__ )
# load original state_dict from URL
snake_case_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
# rename keys
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
# read in qkv matrices
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
snake_case_ = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Check outputs on an image
snake_case_ = 480 if '''ade''' in checkpoint_url else 384
snake_case_ = DPTImageProcessor(size=SCREAMING_SNAKE_CASE__ )
snake_case_ = prepare_img()
snake_case_ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
# forward pass
snake_case_ = model(**SCREAMING_SNAKE_CASE__ ).logits if '''ade''' in checkpoint_url else model(**SCREAMING_SNAKE_CASE__ ).predicted_depth
# Assert logits
snake_case_ = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
snake_case_ = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(SCREAMING_SNAKE_CASE__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , SCREAMING_SNAKE_CASE__ )
)
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 8 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 62 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Union[str, Any] ={
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
order.append(SCREAMING_SNAKE_CASE__ )
return order
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return component
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ):
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
__UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1]
if not visited[vert]:
__UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
components_list.append(SCREAMING_SNAKE_CASE__ )
return components_list
| 62 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__A = Mapping[str, np.ndarray]
__A = Mapping[str, Any] # Is a nested dict.
__A = 0.0_1
@dataclasses.dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowercase_ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowercase_ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowercase_ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowercase_ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowercase_ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowercase_ = None
# Templates used to generate this protein (prediction-only)
lowercase_ = None
# Chain corresponding to each parent
lowercase_ = None
def lowerCAmelCase_ ( __a ) -> Protein:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =R"(\[[A-Z]+\]\n)"
lowerCamelCase__: List[str] =[tag.strip() for tag in re.split(__a , __a ) if len(__a ) > 0]
lowerCamelCase__: Iterator[Tuple[str, List[str]]] =zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
lowerCamelCase__: List[str] =["N", "CA", "C"]
lowerCamelCase__: int =None
lowerCamelCase__: str =None
lowerCamelCase__: Dict =None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCamelCase__: Optional[Any] =g[1][0].strip()
for i in range(len(__a ) ):
if seq[i] not in residue_constants.restypes:
lowerCamelCase__: Optional[int] ="X" # FIXME: strings are immutable
lowerCamelCase__: Optional[int] =np.array(
[residue_constants.restype_order.get(__a , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCamelCase__: List[List[float]] =[]
for axis in range(3 ):
tertiary.append(list(map(__a , g[1][axis].split() ) ) )
lowerCamelCase__: List[str] =np.array(__a )
lowerCamelCase__: List[str] =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__a ):
lowerCamelCase__: str =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCamelCase__: int =np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
lowerCamelCase__: Dict =np.zeros(
(
len(__a ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__a ):
lowerCamelCase__: int =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__a , atom_mask=__a , aatype=__a , residue_index=np.arange(len(__a ) ) , b_factors=__a , )
def lowerCAmelCase_ ( __a , __a = 0 ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Optional[Any] =prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
lowerCamelCase__: List[str] =prot.parents
lowerCamelCase__: Optional[int] =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCamelCase__: int =[p for i, p in zip(__a , __a ) if i == chain_id]
if parents is None or len(__a ) == 0:
lowerCamelCase__: Optional[int] =["N/A"]
pdb_headers.append(F"""PARENT {" ".join(__a )}""" )
return pdb_headers
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Any =pdb_str.split("\n" )
lowerCamelCase__: Optional[Any] =prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
lowerCamelCase__: List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
lowerCamelCase__: int =[]
if prot.parents_chain_index is not None:
lowerCamelCase__: Dict[str, List[str]] ={}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__a ) , [] )
parent_dict[str(__a )].append(__a )
lowerCamelCase__: List[Any] =max([int(__a ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCamelCase__: Optional[Any] =parent_dict.get(str(__a ) , ["N/A"] )
parents_per_chain.append(__a )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCamelCase__: Optional[Any] =[["N/A"]]
def make_parent_line(__a ) -> str:
return F"""PARENT {" ".join(__a )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCamelCase__: Optional[int] =0
for i, l in enumerate(__a ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__a )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__a ):
lowerCamelCase__: Union[str, Any] =parents_per_chain[chain_counter]
else:
lowerCamelCase__: int =["N/A"]
out_pdb_lines.append(make_parent_line(__a ) )
return "\n".join(__a )
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
lowerCamelCase__: str =residue_constants.restypes + ["X"]
def res_atoa(__a ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
lowerCamelCase__: List[str] =residue_constants.atom_types
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Any =prot.atom_mask
lowerCamelCase__: str =prot.aatype
lowerCamelCase__: Optional[int] =prot.atom_positions
lowerCamelCase__: List[str] =prot.residue_index.astype(np.intaa )
lowerCamelCase__: List[str] =prot.b_factors
lowerCamelCase__: str =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
lowerCamelCase__: str =get_pdb_headers(__a )
if len(__a ) > 0:
pdb_lines.extend(__a )
lowerCamelCase__: Dict =aatype.shape[0]
lowerCamelCase__: Dict =1
lowerCamelCase__: List[Any] =0
lowerCamelCase__: Optional[Any] =string.ascii_uppercase
lowerCamelCase__: List[str] =None
# Add all atom sites.
for i in range(__a ):
lowerCamelCase__: Any =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__a , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCamelCase__: Union[str, Any] ="ATOM"
lowerCamelCase__: Union[str, Any] =atom_name if len(__a ) == 4 else F""" {atom_name}"""
lowerCamelCase__: int =""
lowerCamelCase__: List[str] =""
lowerCamelCase__: int =1.0_0
lowerCamelCase__: Optional[int] =atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCamelCase__: Dict =""
lowerCamelCase__: Union[str, Any] ="A"
if chain_index is not None:
lowerCamelCase__: Any =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCamelCase__: str =(
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__a )
atom_index += 1
lowerCamelCase__: Optional[Any] =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCamelCase__: List[str] =True
lowerCamelCase__: Optional[int] =chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCamelCase__: str ="TER"
lowerCamelCase__: List[Any] =(
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__a )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__a , __a ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__a )
def lowerCAmelCase_ ( __a ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase_ ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__a , remark=__a , parents=__a , parents_chain_index=__a , )
| 10 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_A = {'vinai/bartpho-syllable': 1024}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =monolingual_vocab_file
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase ={}
__UpperCamelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =cnt
cnt += 1
with open(A_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__UpperCamelCase =line.strip().split()[0]
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
__UpperCamelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> List[str]:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Any:
return len(self.fairseq_ids_to_tokens )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self , A_ ) -> int:
return self.fairseq_ids_to_tokens[index]
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , A_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(A_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 62 | 0 |
from collections.abc import Sequence
from queue import Queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None) -> Union[str, Any]:
_A : str = start
_A : Optional[int] = end
_A : List[str] = val
_A : Tuple = (start + end) // 2
_A : Any = left
_A : List[Any] = right
def __repr__( self) -> Any:
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase) -> List[str]:
_A : str = collection
_A : Optional[Any] = function
if self.collection:
_A : Optional[int] = self._build_tree(0 , len(__lowerCamelCase) - 1)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Tuple:
self._update_tree(self.root , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[str]:
return self._query_range(self.root , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[str]:
if start == end:
return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.collection[start])
_A : List[Any] = (start + end) // 2
_A : int = self._build_tree(__lowerCamelCase , __lowerCamelCase)
_A : Optional[Any] = self._build_tree(mid + 1 , __lowerCamelCase)
return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.fn(left.val , right.val) , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> int:
if node.start == i and node.end == i:
_A : List[str] = val
return
if i <= node.mid:
self._update_tree(node.left , __lowerCamelCase , __lowerCamelCase)
else:
self._update_tree(node.right , __lowerCamelCase , __lowerCamelCase)
_A : str = self.fn(node.left.val , node.right.val)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> str:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __lowerCamelCase , __lowerCamelCase)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __lowerCamelCase , node.mid) , self._query_range(node.right , node.mid + 1 , __lowerCamelCase) , )
else:
# range in right child tree
return self._query_range(node.right , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
if self.root is not None:
_A : Optional[int] = Queue()
queue.put(self.root)
while not queue.empty():
_A : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 11 |
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
from math import factorial
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: int ):
__lowerCamelCase = real
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [1] * rank
else:
__lowerCamelCase = rank
def __repr__( self: List[Any] ):
return (
F'{self.real}+'
F'{"+".join(str(UpperCamelCase_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , UpperCamelCase_ )
def __add__( self: int , UpperCamelCase_: Tuple ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return Dual(self.real + other , self.duals )
__lowerCamelCase = self.duals.copy()
__lowerCamelCase = other.duals.copy()
if len(UpperCamelCase_ ) > len(UpperCamelCase_ ):
o_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) )
elif len(UpperCamelCase_ ) < len(UpperCamelCase_ ):
s_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) )
__lowerCamelCase = []
for i in range(len(UpperCamelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , UpperCamelCase_ )
UpperCAmelCase__ : Dict = __add__
def __sub__( self: List[str] , UpperCamelCase_: Dict ):
return self + other * -1
def __mul__( self: Any , UpperCamelCase_: Optional[Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , UpperCamelCase_ )
__lowerCamelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = __mul__
def __truediv__( self: Dict , UpperCamelCase_: Tuple ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , UpperCamelCase_ )
raise ValueError
def __floordiv__( self: int , UpperCamelCase_: int ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , UpperCamelCase_ )
raise ValueError
def __pow__( self: Tuple , UpperCamelCase_: str ):
if n < 0 or isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
__lowerCamelCase = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCamelCase__ ( A__ : List[str] , A__ : str , A__ : str ):
'''simple docstring'''
if not callable(A__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(A__ , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(A__ , A__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
__lowerCamelCase = Dual(A__ , 1 )
__lowerCamelCase = func(A__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 12 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None:
super().__init__(**A_ )
__UpperCamelCase =size if size is not None else {'shortest_edge': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
__UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' )
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =resample
__UpperCamelCase =do_center_crop
__UpperCamelCase =crop_size
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_normalize
__UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase =do_convert_rgb
def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image:
__UpperCamelCase =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase =size if size is not None else self.size
__UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ )
__UpperCamelCase =resample if resample is not None else self.resample
__UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase =crop_size if crop_size is not None else self.crop_size
__UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ )
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase =image_mean if image_mean is not None else self.image_mean
__UpperCamelCase =image_std if image_std is not None else self.image_std
__UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase =make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase =[convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(A_ ) for image in images]
if do_resize:
__UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
__UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 62 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = KandinskyVaaControlnetPipeline
_UpperCAmelCase : Optional[int] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : List[str] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Optional[int] = False
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : str):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE_: int = UNetaDConditionModel(**lowerCAmelCase__)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : str):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: str = VQModel(**self.dummy_movq_kwargs)
return model
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.dummy_unet
SCREAMING_SNAKE_CASE_: Optional[Any] = self.dummy_movq
SCREAMING_SNAKE_CASE_: Tuple = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str]=0):
SCREAMING_SNAKE_CASE_: List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowerCAmelCase__)
# create hint
SCREAMING_SNAKE_CASE_: int = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: List[str] = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: List[str] = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Any = "cpu"
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Any = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: List[str] = output.images
SCREAMING_SNAKE_CASE_: Tuple = pipe(
**self.get_dummy_inputs(lowerCAmelCase__) , return_dict=lowerCAmelCase__ , )[0]
SCREAMING_SNAKE_CASE_: Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Any = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy")
SCREAMING_SNAKE_CASE_: Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
SCREAMING_SNAKE_CASE_: Any = torch.from_numpy(np.array(lowerCAmelCase__)).float() / 255.0
SCREAMING_SNAKE_CASE_: Any = hint.permute(2 , 0 , 1).unsqueeze(0)
SCREAMING_SNAKE_CASE_: Tuple = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE_: Dict = pipeline.to(lowerCAmelCase__)
pipeline.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = "A robot, 4k photo"
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Generator(device="cuda").manual_seed(0)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE_: int = torch.Generator(device="cuda").manual_seed(0)
SCREAMING_SNAKE_CASE_: Any = pipeline(
image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , hint=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , output_type="np" , )
SCREAMING_SNAKE_CASE_: int = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 13 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "yolos"
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
__UpperCamelCase =num_detection_tokens
__UpperCamelCase =use_mid_position_embeddings
__UpperCamelCase =auxiliary_loss
# Hungarian matcher
__UpperCamelCase =class_cost
__UpperCamelCase =bbox_cost
__UpperCamelCase =giou_cost
# Loss coefficients
__UpperCamelCase =bbox_loss_coefficient
__UpperCamelCase =giou_loss_coefficient
__UpperCamelCase =eos_coefficient
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self ) -> float:
return 1E-4
@property
def _a ( self ) -> int:
return 12
| 62 | 0 |
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> list[int]:
"""simple docstring"""
A__ = [0] * no_of_processes
A__ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowercase_ ):
A__ = burst_time[i]
A__ = 0
A__ = 0
A__ = 999_999_999
A__ = 0
A__ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowercase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A__ = remaining_time[j]
A__ = j
A__ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A__ = remaining_time[short]
if minm == 0:
A__ = 999_999_999
if remaining_time[short] == 0:
complete += 1
A__ = False
# Find finish time of current process
A__ = increment_time + 1
# Calculate waiting time
A__ = finish_time - arrival_time[short]
A__ = finar - burst_time[short]
if waiting_time[short] < 0:
A__ = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> list[int]:
"""simple docstring"""
A__ = [0] * no_of_processes
for i in range(lowercase_ ):
A__ = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 0
A__ = 0
for i in range(lowercase_ ):
A__ = total_waiting_time + waiting_time[i]
A__ = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_lowerCamelCase : str = int(input())
_lowerCamelCase : Optional[int] = [0] * no_of_processes
_lowerCamelCase : str = [0] * no_of_processes
_lowerCamelCase : Tuple = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = map(int, input().split())
_lowerCamelCase : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowerCamelCase : Any = burst_time
_lowerCamelCase : Optional[Any] = no_of_processes
_lowerCamelCase : Any = waiting_time
_lowerCamelCase : Optional[int] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_lowerCamelCase : Optional[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 14 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
SCREAMING_SNAKE_CASE :Dict = pytest.mark.integration
@require_faiss
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
__A = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase_ ( self : Optional[Any] ):
import faiss
__A = self._create_dummy_dataset()
__A = dset.map(
lambda A ,A : {"vecs": i * np.ones(5 ,dtype=np.floataa )} ,with_indices=A ,keep_in_memory=A )
__A = dset.add_faiss_index("vecs" ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT )
__A , __A = dset.get_nearest_examples("vecs" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
dset.drop_index("vecs" )
def UpperCamelCase_ ( self : Tuple ):
import faiss
__A = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
__A , __A = dset.get_nearest_examples("vecs" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
def UpperCamelCase_ ( self : List[str] ):
import faiss
__A = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A ) as tmp_file:
dset.save_faiss_index("vecs" ,tmp_file.name )
dset.load_faiss_index("vecs2" ,tmp_file.name )
os.unlink(tmp_file.name )
__A , __A = dset.get_nearest_examples("vecs2" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(A ,partial(dset.get_nearest_examples ,"vecs2" ,np.ones(5 ,dtype=np.floataa ) ) )
def UpperCamelCase_ ( self : Dict ):
from elasticsearch import Elasticsearch
__A = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__A = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__A = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__A = Elasticsearch()
dset.add_elasticsearch_index("filename" ,es_client=A )
__A , __A = dset.get_nearest_examples("filename" ,"my_name-train_29" )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
@require_faiss
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ):
import faiss
__A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal ,5 )
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal ,10 )
# single query
__A = np.zeros(5 ,dtype=np.floataa )
__A = 1
__A , __A = index.search(A )
self.assertRaises(A ,index.search ,query.reshape(-1 ,1 ) )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
# batched queries
__A = np.eye(5 ,dtype=np.floataa )[::-1]
__A , __A = index.search_batch(A )
self.assertRaises(A ,index.search_batch ,queries[0] )
__A = [scores[0] for scores in total_scores]
__A = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A ) ,0 )
self.assertListEqual([4, 3, 2, 1, 0] ,A )
def UpperCamelCase_ ( self : Tuple ):
import faiss
__A = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
__A = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH )
with self.assertRaises(A ):
__A = FaissIndex(string_factory="Flat" ,custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase_ ( self : Any ):
import faiss
__A = faiss.IndexFlat(5 )
__A = FaissIndex(custom_index=A )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
def UpperCamelCase_ ( self : Union[str, Any] ):
import faiss
__A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A ) as tmp_file:
index.save(tmp_file.name )
__A = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__A = np.zeros(5 ,dtype=np.floataa )
__A = 1
__A , __A = index.search(A )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
@require_faiss
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
import faiss
__A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__A = "index.faiss"
__A = F'''mock://{index_name}'''
index.save(a_ , storage_options=mockfs.storage_options )
__A = FaissIndex.load(a_ , storage_options=mockfs.storage_options )
__A = np.zeros(5 , dtype=np.floataa )
__A = 1
__A , __A = index.search(a_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__A = Elasticsearch()
__A = {"acknowledged": True}
__A = ElasticSearchIndex(es_client=A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__A = "foo"
__A = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__A , __A = index.search(A )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# single query with timeout
__A = "foo"
__A = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__A , __A = index.search(A ,request_timeout=30 )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# batched queries
__A = ["foo", "bar", "foobar"]
__A = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__A , __A = index.search_batch(A )
__A = [scores[0] for scores in total_scores]
__A = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A ) ,0 )
self.assertListEqual([1, 1, 1] ,A )
# batched queries with timeout
__A = ["foo", "bar", "foobar"]
__A = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__A , __A = index.search_batch(A ,request_timeout=30 )
__A = [scores[0] for scores in total_scores]
__A = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A ) ,0 )
self.assertListEqual([1, 1, 1] ,A )
| 15 |
from __future__ import annotations
import math
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> None:
__UpperCamelCase =size
# approximate the overall size of segment tree with given value
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
# create array to store lazy update
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
__UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update
def _a ( self , A_ ) -> int:
return idx * 2
def _a ( self , A_ ) -> int:
return idx * 2 + 1
def _a ( self , A_ , A_ , A_ , A_ ) -> None:
if left_element == right_element:
__UpperCamelCase =a[left_element - 1]
else:
__UpperCamelCase =(left_element + right_element) // 2
self.build(self.left(A_ ) , A_ , A_ , A_ )
self.build(self.right(A_ ) , mid + 1 , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__UpperCamelCase =val
if left_element != right_element:
__UpperCamelCase =val
__UpperCamelCase =val
__UpperCamelCase =True
__UpperCamelCase =True
return True
__UpperCamelCase =(left_element + right_element) // 2
self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ )
self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
return True
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__UpperCamelCase =(left_element + right_element) // 2
__UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ )
__UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ )
return max(A_ , A_ )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_A = 15
_A = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 62 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase_ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
lowerCAmelCase_ = {
'ctrl': 256,
}
lowerCAmelCase_ = {
'Pregnancy': 168_629,
'Christianity': 7_675,
'Explain': 106_423,
'Fitness': 63_440,
'Saving': 63_163,
'Ask': 27_171,
'Ass': 95_985,
'Joke': 163_509,
'Questions': 45_622,
'Thoughts': 49_605,
'Retail': 52_342,
'Feminism': 164_338,
'Writing': 11_992,
'Atheism': 192_263,
'Netflix': 48_616,
'Computing': 39_639,
'Opinion': 43_213,
'Alone': 44_967,
'Funny': 58_917,
'Gaming': 40_358,
'Human': 4_088,
'India': 1_331,
'Joker': 77_138,
'Diet': 36_206,
'Legal': 11_859,
'Norman': 4_939,
'Tip': 72_689,
'Weight': 52_343,
'Movies': 46_273,
'Running': 23_425,
'Science': 2_090,
'Horror': 37_793,
'Confession': 60_572,
'Finance': 12_250,
'Politics': 16_360,
'Scary': 191_985,
'Support': 12_654,
'Technologies': 32_516,
'Teenage': 66_160,
'Event': 32_769,
'Learned': 67_460,
'Notion': 182_770,
'Wikipedia': 37_583,
'Books': 6_665,
'Extract': 76_050,
'Confessions': 102_701,
'Conspiracy': 75_932,
'Links': 63_674,
'Narcissus': 150_425,
'Relationship': 54_766,
'Relationships': 134_796,
'Reviews': 41_671,
'News': 4_256,
'Translation': 26_820,
'multilingual': 128_406,
}
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : Dict = set()
lowercase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Dict = char
lowercase__ : Tuple = set(__lowerCamelCase )
return pairs
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[Any] = CONTROL_CODES
def __init__( self : int ,_snake_case : str ,_snake_case : Tuple ,_snake_case : List[str]="<unk>" ,**_snake_case : List[str] ) -> Tuple:
"""simple docstring"""
super().__init__(unk_token=_snake_case ,**_snake_case )
with open(_snake_case ,encoding='''utf-8''' ) as vocab_handle:
lowercase__ : Dict = json.load(_snake_case )
lowercase__ : str = {v: k for k, v in self.encoder.items()}
with open(_snake_case ,encoding='''utf-8''' ) as merges_handle:
lowercase__ : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
lowercase__ : List[Any] = [tuple(merge.split() ) for merge in merges]
lowercase__ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : int = {}
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return len(self.encoder )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase ( self : Any ,_snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ : str = tuple(_snake_case )
lowercase__ : Dict = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase__ : Any = get_pairs(_snake_case )
if not pairs:
return token
while True:
lowercase__ : Dict = min(_snake_case ,key=lambda _snake_case : self.bpe_ranks.get(_snake_case ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Any = bigram
lowercase__ : Tuple = []
lowercase__ : Any = 0
while i < len(_snake_case ):
try:
lowercase__ : Optional[Any] = word.index(_snake_case ,_snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : int = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : Tuple = tuple(_snake_case )
lowercase__ : int = new_word
if len(_snake_case ) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(_snake_case )
lowercase__ : Union[str, Any] = '''@@ '''.join(_snake_case )
lowercase__ : List[Any] = word[:-4]
lowercase__ : int = word
return word
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = []
lowercase__ : int = re.findall(r'''\S+\n?''' ,_snake_case )
for token in words:
split_tokens.extend(list(self.bpe(_snake_case ).split(''' ''' ) ) )
return split_tokens
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(_snake_case ,self.unk_token )
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = ''' '''.join(_snake_case ).replace('''@@ ''' ,'''''' ).strip()
return out_string
def UpperCAmelCase ( self : Dict ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : int = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + '''\n''' )
lowercase__ : Optional[int] = 0
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase__ : List[Any] = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 16 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ):
__UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' )
__UpperCamelCase =soup.find_all('td' , attrs='titleColumn' )
__UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ):
__UpperCamelCase =get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file:
__UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | 0 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_a = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 17 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip_vision_model"
def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =qkv_bias
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "instructblip_qformer"
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =cross_attention_frequency
__UpperCamelCase =encoder_hidden_size
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip"
UpperCAmelCase__ : Optional[Any] = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]:
super().__init__(**A_ )
if vision_config is None:
__UpperCamelCase ={}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__UpperCamelCase ={}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__UpperCamelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCamelCase =InstructBlipVisionConfig(**A_ )
__UpperCamelCase =InstructBlipQFormerConfig(**A_ )
__UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ )
__UpperCamelCase =self.text_config.tie_word_embeddings
__UpperCamelCase =self.text_config.is_encoder_decoder
__UpperCamelCase =num_query_tokens
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase =1.0
__UpperCamelCase =0.02
@classmethod
def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.qformer_config.to_dict()
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 62 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ):
"""simple docstring"""
return (-y * np.log(lowerCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.dot(lowerCAmelCase , lowerCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase ) ) )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=7_0_0_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = np.dot(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = sigmoid_function(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE_ : Optional[Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE_ : List[str] = np.dot(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = sigmoid_function(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = cost_function(lowerCAmelCase , lowerCAmelCase )
if iterations % 1_0_0 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = datasets.load_iris()
__lowerCamelCase : List[Any] = iris.data[:, :2]
__lowerCamelCase : List[str] = (iris.target != 0) * 1
__lowerCamelCase : Tuple = 0.1
__lowerCamelCase : str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _snake_case ( lowerCAmelCase : Any ):
"""simple docstring"""
return sigmoid_function(
np.dot(lowerCAmelCase , lowerCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCamelCase) , (__lowerCamelCase)) : Any = (x[:, 1].min(), x[:, 1].max())
((__lowerCamelCase) , (__lowerCamelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCamelCase : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCamelCase : Union[str, Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 18 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(rows * cols * num_images )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
__UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =labels_dense.shape[0]
__UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
__UpperCamelCase =numpy.zeros((num_labels, num_classes) )
__UpperCamelCase =1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
A_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__UpperCamelCase =10000
__UpperCamelCase =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCamelCase =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCamelCase =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCamelCase =images.astype(numpy.floataa )
__UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 )
__UpperCamelCase =images
__UpperCamelCase =labels
__UpperCamelCase =0
__UpperCamelCase =0
@property
def _a ( self ) -> Tuple:
return self._images
@property
def _a ( self ) -> Union[str, Any]:
return self._labels
@property
def _a ( self ) -> Optional[Any]:
return self._num_examples
@property
def _a ( self ) -> List[str]:
return self._epochs_completed
def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]:
if fake_data:
__UpperCamelCase =[1] * 784
__UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
__UpperCamelCase =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perma]
__UpperCamelCase =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCamelCase =self._num_examples - start
__UpperCamelCase =self._images[start : self._num_examples]
__UpperCamelCase =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perm]
__UpperCamelCase =self.labels[perm]
# Start next epoch
__UpperCamelCase =0
__UpperCamelCase =batch_size - rest_num_examples
__UpperCamelCase =self._index_in_epoch
__UpperCamelCase =self._images[start:end]
__UpperCamelCase =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCamelCase =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
__UpperCamelCase =f.size()
print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =fake()
__UpperCamelCase =fake()
__UpperCamelCase =fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
__UpperCamelCase =DEFAULT_SOURCE_URL
__UpperCamelCase ='train-images-idx3-ubyte.gz'
__UpperCamelCase ='train-labels-idx1-ubyte.gz'
__UpperCamelCase ='t10k-images-idx3-ubyte.gz'
__UpperCamelCase ='t10k-labels-idx1-ubyte.gz'
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =(
'Validation size should be between 0 and '
F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =train_images[:validation_size]
__UpperCamelCase =train_labels[:validation_size]
__UpperCamelCase =train_images[validation_size:]
__UpperCamelCase =train_labels[validation_size:]
__UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed}
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 62 | 0 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
lowerCamelCase_ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
lowerCamelCase_ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase , lowercase )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(lowercase )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase ) , [
{"sequence": "My name is John", "score": 0.0_0_8, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_0_7, "token": 1573, "token_str": " Chris"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_5_1,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_1_4,
"token": 12790,
"token_str": " Lyon",
},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase ) , [
{"sequence": "My name is Patrick", "score": 0.0_0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_0_0, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_0_0, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
lowerCamelCase_ = None
lowerCamelCase_ = None
self.run_pipeline_test(lowercase , [] )
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
lowerCamelCase_ = None
lowerCamelCase_ = None
self.run_pipeline_test(lowercase , [] )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = fill_masker.tokenizer
lowerCamelCase_ = fill_masker.model
lowerCamelCase_ = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
lowercase , [
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
] , )
with self.assertRaises(lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowercase ):
fill_masker("This is" )
self.run_test_top_k(lowercase , lowercase )
self.run_test_targets(lowercase , lowercase )
self.run_test_top_k_targets(lowercase , lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(lowercase , lowercase )
self.fill_mask_with_multiple_masks(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = tokenizer.get_vocab()
lowerCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase , targets=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase )
lowerCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase ) )
# Call argument
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase )
lowerCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase ) )
# Score equivalence
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
lowerCamelCase_ = [top_mask["token_str"] for top_mask in outputs]
lowerCamelCase_ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ) == set(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
lowerCamelCase_ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
# Raises with invalid
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""] )
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets="" )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase , top_k=2 )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = tokenizer.get_vocab()
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
# top_k=2, ntargets=3
lowerCamelCase_ = sorted(vocab.keys() )[:3]
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCamelCase_ = [el["token_str"] for el in sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ).issubset(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCamelCase_ = sorted(vocab.keys() )[:3]
lowerCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCamelCase_ = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase ) , 3 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase , [
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
] , )
| 19 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = TransfoXLTokenizer
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
def _a ( self ) -> Union[str, Any]:
super().setUp()
__UpperCamelCase =[
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _a ( self , **A_ ) -> Optional[int]:
__UpperCamelCase =True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='<unk> UNwanted , running'
__UpperCamelCase ='<unk> unwanted, running'
return input_text, output_text
def _a ( self ) -> str:
__UpperCamelCase =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ )
__UpperCamelCase =tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] )
def _a ( self ) -> Any:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> int:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
__UpperCamelCase ='Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
__UpperCamelCase =[
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =len(A_ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 62 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = val
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase : Tuple = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowercase : List[str] = value
else:
lowercase : Union[str, Any] = value
return new_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[Any]:
lowercase : List[str] = """"""
if is_panoptic:
lowercase : Optional[Any] = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase : int = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
lowercase : Any = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase : str = in_proj_weight[:256, :]
lowercase : List[Any] = in_proj_bias[:256]
lowercase : Union[str, Any] = in_proj_weight[256:512, :]
lowercase : Optional[int] = in_proj_bias[256:512]
lowercase : List[Any] = in_proj_weight[-256:, :]
lowercase : str = in_proj_bias[-256:]
def _snake_case( ) -> Optional[int]:
lowercase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Any = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase : Dict = """resnet101"""
if "dc5" in model_name:
lowercase : str = True
lowercase : Tuple = """panoptic""" in model_name
if is_panoptic:
lowercase : int = 250
else:
lowercase : Optional[Any] = 91
lowercase : Optional[Any] = """huggingface/label-files"""
lowercase : Tuple = """coco-detection-id2label.json"""
lowercase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
lowercase : Any = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : int = idalabel
lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load image processor
lowercase : str = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowercase : List[Any] = ConditionalDetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
# prepare image
lowercase : List[str] = prepare_img()
lowercase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
lowercase : Tuple = encoding["""pixel_values"""]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
lowercase : List[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Dict = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase : str = """conditional_detr.""" + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = rename_backbone_keys(SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase : Tuple = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowercase : str = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowercase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowercase : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = val
# finally, create HuggingFace model and load state dict
lowercase : int = ConditionalDetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else ConditionalDetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
model.push_to_hub(repo_id=SCREAMING_SNAKE_CASE__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
lowercase : List[str] = conditional_detr(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowercase : Optional[int] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 20 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
def UpperCamelCase_( lowerCamelCase_ ) -> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowercase : str = [True] * (num + 1)
_lowercase : str = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCamelCase_ ):
_lowercase : Dict = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 21 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return np.sum(outputs == labels )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f:
__UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
next(SCREAMING_SNAKE_CASE__ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
for dataset in encoded_datasets:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =mc_label
__UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) )
return tensor_datasets
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
print(SCREAMING_SNAKE_CASE__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCamelCase =['_start_', '_delimiter_', '_classify_']
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
model.to(SCREAMING_SNAKE_CASE__ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj]
logger.info('Encoding dataset...' )
__UpperCamelCase =load_rocstories_dataset(args.train_dataset )
__UpperCamelCase =load_rocstories_dataset(args.eval_dataset )
__UpperCamelCase =(train_dataset, eval_dataset)
__UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ )
# Compute the max input length for the Transformer
__UpperCamelCase =model.config.n_positions // 2 - 2
__UpperCamelCase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1]
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size )
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCamelCase =args.max_steps
__UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCamelCase =list(model.named_parameters() )
__UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__UpperCamelCase =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCamelCase =get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
if args.do_train:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCamelCase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE__ )
if args.do_eval:
model.eval()
__UpperCamelCase , __UpperCamelCase =0, 0
__UpperCamelCase , __UpperCamelCase =0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model(
SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =mc_logits.detach().cpu().numpy()
__UpperCamelCase =mc_labels.to('cpu' ).numpy()
__UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCamelCase =eval_loss / nb_eval_steps
__UpperCamelCase =eval_accuracy / nb_eval_examples
__UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None
__UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 62 | 0 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = CLIPConfig
_lowerCamelCase : Optional[Any] = ["""CLIPEncoderLayer"""]
def __init__( self : Dict , snake_case_ : CLIPConfig ):
super().__init__(snake_case_ )
_UpperCAmelCase = CLIPVisionModelWithProjection(config.vision_config )
_UpperCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 )
_UpperCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowercase ( self : List[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int]=0.5 , snake_case_ : List[str]=0.5 ):
_UpperCAmelCase = self.vision_model(snake_case_ )[0]
_UpperCAmelCase = self.p_head(snake_case_ )
_UpperCAmelCase = nsfw_detected.flatten()
_UpperCAmelCase = nsfw_detected > p_threshold
_UpperCAmelCase = nsfw_detected.tolist()
if any(snake_case_ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(snake_case_ ):
if nsfw_detected_:
_UpperCAmelCase = np.zeros(images[idx].shape )
_UpperCAmelCase = self.w_head(snake_case_ )
_UpperCAmelCase = watermark_detected.flatten()
_UpperCAmelCase = watermark_detected > w_threshold
_UpperCAmelCase = watermark_detected.tolist()
if any(snake_case_ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(snake_case_ ):
if watermark_detected_:
_UpperCAmelCase = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 22 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ):
__UpperCamelCase =1
__UpperCamelCase =0
__UpperCamelCase =1
__UpperCamelCase =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62 | 0 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , __snake_case : int = 128 , __snake_case : int = 256 , __snake_case : float = 20_00.0 , __snake_case : int = 768 , __snake_case : int = 12 , __snake_case : int = 12 , __snake_case : int = 64 , __snake_case : int = 2048 , __snake_case : float = 0.1 , ) -> Any:
super().__init__()
UpperCAmelCase : List[Any] = nn.Sequential(
nn.Linear(__snake_case , d_model * 4 , bias=__snake_case ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__snake_case ) , nn.SiLU() , )
UpperCAmelCase : Union[str, Any] = nn.Embedding(__snake_case , __snake_case )
UpperCAmelCase : Any = False
UpperCAmelCase : Union[str, Any] = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
UpperCAmelCase : Tuple = nn.Dropout(p=__snake_case )
UpperCAmelCase : int = nn.ModuleList()
for lyr_num in range(__snake_case ):
# FiLM conditional T5 decoder
UpperCAmelCase : List[Any] = DecoderLayer(d_model=__snake_case , d_kv=__snake_case , num_heads=__snake_case , d_ff=__snake_case , dropout_rate=__snake_case )
self.decoders.append(__snake_case )
UpperCAmelCase : Optional[int] = TaLayerNorm(__snake_case )
UpperCAmelCase : Union[str, Any] = nn.Dropout(p=__snake_case )
UpperCAmelCase : Tuple = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
def A ( self : Any , __snake_case : List[Any] , __snake_case : Optional[int] ) -> Dict:
UpperCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def A ( self : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : List[str] ) -> Any:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase : Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase : Optional[Any] = self.conditioning_emb(__snake_case ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase : Optional[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase : int = torch.broadcast_to(
torch.arange(__snake_case , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase : Optional[Any] = self.position_encoding(__snake_case )
UpperCAmelCase : Tuple = self.continuous_inputs_projection(__snake_case )
inputs += position_encodings
UpperCAmelCase : Optional[Any] = self.dropout(__snake_case )
# decoder: No padding present.
UpperCAmelCase : List[str] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase : Tuple = [(x, self.encoder_decoder_mask(__snake_case , __snake_case )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase : Optional[int] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase : Any = lyr(
__snake_case , conditioning_emb=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )[0]
UpperCAmelCase : List[Any] = self.decoder_norm(__snake_case )
UpperCAmelCase : Union[str, Any] = self.post_dropout(__snake_case )
UpperCAmelCase : Optional[int] = self.spec_out(__snake_case )
return spec_out
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Union[str, Any]=1E-6 ) -> Optional[Any]:
super().__init__()
UpperCAmelCase : Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__snake_case , d_kv=__snake_case , num_heads=__snake_case , dropout_rate=__snake_case ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__snake_case , d_kv=__snake_case , num_heads=__snake_case , dropout_rate=__snake_case , layer_norm_epsilon=__snake_case , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__snake_case , d_ff=__snake_case , dropout_rate=__snake_case , layer_norm_epsilon=__snake_case ) )
def A ( self : Optional[int] , __snake_case : List[Any] , __snake_case : int=None , __snake_case : List[str]=None , __snake_case : Optional[int]=None , __snake_case : Optional[Any]=None , __snake_case : Any=None , ) -> List[str]:
UpperCAmelCase : Optional[int] = self.layer[0](
__snake_case , conditioning_emb=__snake_case , attention_mask=__snake_case , )
if encoder_hidden_states is not None:
UpperCAmelCase : Any = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase : str = self.layer[1](
__snake_case , key_value_states=__snake_case , attention_mask=__snake_case , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase : Tuple = self.layer[-1](__snake_case , __snake_case )
return (hidden_states,)
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Union[str, Any] ) -> Optional[int]:
super().__init__()
UpperCAmelCase : Optional[int] = TaLayerNorm(__snake_case )
UpperCAmelCase : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=__snake_case )
UpperCAmelCase : Union[str, Any] = Attention(query_dim=__snake_case , heads=__snake_case , dim_head=__snake_case , out_bias=__snake_case , scale_qk=__snake_case )
UpperCAmelCase : Union[str, Any] = nn.Dropout(__snake_case )
def A ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , ) -> Optional[Any]:
# pre_self_attention_layer_norm
UpperCAmelCase : Union[str, Any] = self.layer_norm(__snake_case )
if conditioning_emb is not None:
UpperCAmelCase : Optional[int] = self.FiLMLayer(__snake_case , __snake_case )
# Self-attention block
UpperCAmelCase : Tuple = self.attention(__snake_case )
UpperCAmelCase : Optional[int] = hidden_states + self.dropout(__snake_case )
return hidden_states
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : int , __snake_case : List[Any] , __snake_case : Any ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase : Optional[int] = Attention(query_dim=__snake_case , heads=__snake_case , dim_head=__snake_case , out_bias=__snake_case , scale_qk=__snake_case )
UpperCAmelCase : List[str] = TaLayerNorm(__snake_case , eps=__snake_case )
UpperCAmelCase : Optional[Any] = nn.Dropout(__snake_case )
def A ( self : Any , __snake_case : int , __snake_case : int=None , __snake_case : int=None , ) -> List[Any]:
UpperCAmelCase : Tuple = self.layer_norm(__snake_case )
UpperCAmelCase : Any = self.attention(
__snake_case , encoder_hidden_states=__snake_case , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase : Optional[Any] = hidden_states + self.dropout(__snake_case )
return layer_output
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : int , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int ) -> int:
super().__init__()
UpperCAmelCase : List[str] = TaDenseGatedActDense(d_model=__snake_case , d_ff=__snake_case , dropout_rate=__snake_case )
UpperCAmelCase : int = TaFiLMLayer(in_features=d_model * 4 , out_features=__snake_case )
UpperCAmelCase : Any = TaLayerNorm(__snake_case , eps=__snake_case )
UpperCAmelCase : Dict = nn.Dropout(__snake_case )
def A ( self : List[Any] , __snake_case : List[Any] , __snake_case : Dict=None ) -> str:
UpperCAmelCase : List[Any] = self.layer_norm(__snake_case )
if conditioning_emb is not None:
UpperCAmelCase : Tuple = self.film(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = self.DenseReluDense(__snake_case )
UpperCAmelCase : Tuple = hidden_states + self.dropout(__snake_case )
return hidden_states
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Any , __snake_case : Union[str, Any] ) -> str:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
UpperCAmelCase : Optional[int] = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
UpperCAmelCase : Tuple = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
UpperCAmelCase : int = nn.Dropout(__snake_case )
UpperCAmelCase : Dict = NewGELUActivation()
def A ( self : Optional[int] , __snake_case : List[Any] ) -> Tuple:
UpperCAmelCase : List[str] = self.act(self.wi_a(__snake_case ) )
UpperCAmelCase : str = self.wi_a(__snake_case )
UpperCAmelCase : Any = hidden_gelu * hidden_linear
UpperCAmelCase : Any = self.dropout(__snake_case )
UpperCAmelCase : Union[str, Any] = self.wo(__snake_case )
return hidden_states
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : str , __snake_case : List[str]=1E-6 ) -> str:
super().__init__()
UpperCAmelCase : int = nn.Parameter(torch.ones(__snake_case ) )
UpperCAmelCase : Union[str, Any] = eps
def A ( self : List[Any] , __snake_case : Dict ) -> List[str]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCAmelCase : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__snake_case )
UpperCAmelCase : Optional[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def A ( self : Tuple , __snake_case : torch.Tensor ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(__snake_case , 3.0 )) ))
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> List[str]:
super().__init__()
UpperCAmelCase : List[str] = nn.Linear(__snake_case , out_features * 2 , bias=__snake_case )
def A ( self : Dict , __snake_case : List[str] , __snake_case : str ) -> int:
UpperCAmelCase : Dict = self.scale_bias(__snake_case )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = torch.chunk(__snake_case , 2 , -1 )
UpperCAmelCase : Optional[int] = x * (1 + scale) + shift
return x
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Tuple=None ) -> Optional[int]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
__snake_case = nn.Parameter(snake_case_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
__snake_case = nn.Parameter(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Dict ) -> Dict:
# set torch weights for 1-to-1 comparison
__snake_case = np.asarray(weights[0] )
__snake_case = np.asarray(weights[1] )
__snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case_ ).view(-1 , snake_case_ ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Dict ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
__snake_case = np.asarray(weights[0] )
__snake_case = np.asarray(weights[1] )
__snake_case = np.asarray(weights[2] )
__snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case_ ).view(-1 , snake_case_ ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[Any] ) -> List[Any]:
# layernorm 1
__snake_case = weights[0][0][0]
__snake_case = np.asarray(layer_norm_a[0] )
__snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# lsh weights + output
__snake_case = weights[0][1]
if len(snake_case_ ) < 4:
set_layer_weights_in_torch_lsh(snake_case_ , torch_block.attention , snake_case_ )
else:
set_layer_weights_in_torch_local(snake_case_ , torch_block.attention , snake_case_ )
# intermediate weighs
__snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case_ ) == 4:
__snake_case = intermediate_weights[2]
# layernorm 2
__snake_case = np.asarray(intermediate_weights[0][0] )
__snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# intermediate dense
__snake_case = np.asarray(intermediate_weights[1][0] )
__snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
# intermediate out
__snake_case = np.asarray(intermediate_weights[4][0] )
__snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> str:
# reformer model
__snake_case = torch_model.reformer
# word embeds
__snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case_ ) , )
if isinstance(weights[3] , snake_case_ ):
__snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
__snake_case = nn.Parameter(torch.tensor(snake_case_ ) )
__snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case_ , snake_case_ , snake_case_ )
# output layer norm
__snake_case = np.asarray(weights[7][0] )
__snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# output embeddings
__snake_case = np.asarray(weights[9][0] )
__snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Tuple ) -> Tuple:
# Initialise PyTorch model
__snake_case = ReformerConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case = ReformerModelWithLMHead(snake_case_ )
with open(snake_case_ , '''rb''' ) as f:
__snake_case = pickle.load(snake_case_ )['''weights''']
set_model_weights_in_torch(snake_case_ , snake_case_ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 24 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ).loss
SCREAMING_SNAKE_CASE__ : List[Any] = -tf.math.reduce_mean(SCREAMING_SNAKE_CASE__ ).numpy()
SCREAMING_SNAKE_CASE__ : List[str] = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 25 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict:
_A : str = parent
_A : int = batch_size
_A : Optional[int] = num_channels
_A : List[Any] = image_size
_A : int = min_resolution
_A : Optional[int] = max_resolution
_A : Any = do_resize
_A : List[str] = size if size is not None else {"""height""": 18, """width""": 20}
_A : Optional[int] = do_thumbnail
_A : str = do_align_axis
_A : List[Any] = do_pad
_A : Optional[Any] = do_normalize
_A : Tuple = image_mean
_A : List[str] = image_std
def a__ ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DonutImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : List[str] = DonutImageProcessingTester(self )
@property
def a__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_thumbnail""" ) )
self.assertTrue(hasattr(_a , """do_align_long_axis""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def a__ ( self ) -> Union[str, Any]:
pass
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Dict:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 26 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ ) -> float:
return 0.0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show()
| 62 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__lowercase : Dict = logging.getLogger()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : list ):
__a : Union[str, Any] = '\n'.join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open('w' ).writelines(_SCREAMING_SNAKE_CASE )
__lowercase : Any = 'patrickvonplaten/t5-tiny-random'
__lowercase : List[Any] = 'sshleifer/bart-tiny-random'
__lowercase : Dict = 'sshleifer/tiny-mbart'
__lowercase : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__a : Union[str, Any] = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__a : List[str] = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(__a , __a )
__a : int = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
__a : Tuple = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__a : Optional[Any] = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(__a , 'argv' , __a ):
run_generate()
assert Path(__a ).exists()
# os.remove(Path(output_file_name))
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.run_eval_tester(__a )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.run_eval_tester(__a )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__a : Union[str, Any] = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__a : str = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
__a : Optional[int] = Path(self.get_auto_remove_tmp_dir() )
__a : List[Any] = str(tmp_dir / 'scores.json' )
__a : Optional[Any] = str(tmp_dir / 'val.target' )
_dump_articles(__a , text['en'] )
_dump_articles(__a , text['de'] )
__a : List[Any] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__a : Optional[int] = f"""
run_eval_search.py
{model}
{str(__a )}
{str(__a )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(__a , 'argv' , __a ):
with CaptureStdout() as cs:
run_search()
__a : Optional[Any] = [' num_beams | length_penalty', model, 'Best score args']
__a : Dict = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(__a )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__a ).exists()
os.remove(Path(__a ) )
| 27 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowerCamelCase : int = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class SCREAMING_SNAKE_CASE ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : str = " " ):
"""simple docstring"""
UpperCamelCase = sentence_delimiter
def A ( self : Union[str, Any] , UpperCamelCase__ : str ):
"""simple docstring"""
return list(UpperCamelCase__ )
def A ( self : List[str] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = []
for sent_idx, sentence in enumerate(UpperCamelCase__ ):
chars.extend(self.process_string(UpperCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowerCamelCase : Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowerCamelCase : str = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowerCamelCase : str = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_lowerCamelCase : List[Any] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
_lowerCamelCase : Tuple = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def A ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 28 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "mvp"
UpperCAmelCase__ : Tuple = ["past_key_values"]
UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =classifier_dropout
__UpperCamelCase =use_cache
__UpperCamelCase =encoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase =use_prompt
__UpperCamelCase =prompt_length
__UpperCamelCase =prompt_mid_dim
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
__UpperCamelCase =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 62 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = GPTaTokenizer
UpperCAmelCase__ : Any = GPTaTokenizerFast
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : int = {"add_prefix_space": True}
UpperCAmelCase__ : Any = False
def _a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase ={'unk_token': '<unk>'}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _a ( self , **A_ ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , **A_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='lower newer'
__UpperCamelCase ='lower newer'
return input_text, output_text
def _a ( self ) -> List[Any]:
__UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='lower newer'
__UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self ) -> int:
if not self.test_rust_tokenizer:
return
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase ='lower newer'
# Testing tokenization
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids without special tokens
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids with special tokens
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# Testing the unknown token
__UpperCamelCase =tokens + [rust_tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self , *A_ , **A_ ) -> Optional[int]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _a ( self , A_=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
def _a ( self ) -> int:
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__UpperCamelCase =tokenizer.pad_token_id
__UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
__UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ='$$$'
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ )
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =tokenizer.bos_token_id
__UpperCamelCase =tokenizer(A_ )
__UpperCamelCase =tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] , A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__UpperCamelCase =tokenizer.decode(out_s.input_ids )
__UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> Any:
# TODO: change to self.get_tokenizers() when the fast version is implemented
__UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='Encode this.'
__UpperCamelCase ='This one too please.'
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(
A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , )
__UpperCamelCase =encoded_sequence_dict['input_ids']
__UpperCamelCase =encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(A_ ) , len(A_ ) )
__UpperCamelCase =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
__UpperCamelCase =[x for x in filtered_sequence if x is not None]
self.assertEqual(A_ , A_ )
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
def _a ( self ) -> Dict:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# Same as above
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='bos'
__UpperCamelCase =tokenizer.get_vocab()['bos']
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# We changed the bos token
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
| 62 | 0 |
def a ( snake_case__: int = 1_000 ):
'''simple docstring'''
lowercase_ = 2**power
lowercase_ = 0
while n:
lowercase_ , lowercase_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 62 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Dict = MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCamelCase: Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _A ( self : Tuple ):
_UpperCAmelCase : Union[str, Any] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
_UpperCAmelCase : Optional[int] = text_generator("This is a test" , do_sample=A )
self.assertEqual(
A , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
_UpperCAmelCase : Dict = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
A , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
_UpperCAmelCase : Any = text_generator("This is a test" , do_sample=A , num_return_sequences=2 , return_tensors=A )
self.assertEqual(
A , [
{"generated_token_ids": ANY(A )},
{"generated_token_ids": ANY(A )},
] , )
_UpperCAmelCase : Any = text_generator.model.config.eos_token_id
_UpperCAmelCase : List[Any] = "<pad>"
_UpperCAmelCase : Dict = text_generator(
["This is a test", "This is a second test"] , do_sample=A , num_return_sequences=2 , batch_size=2 , return_tensors=A , )
self.assertEqual(
A , [
[
{"generated_token_ids": ANY(A )},
{"generated_token_ids": ANY(A )},
],
[
{"generated_token_ids": ANY(A )},
{"generated_token_ids": ANY(A )},
],
] , )
@require_tf
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : int = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
_UpperCAmelCase : int = text_generator("This is a test" , do_sample=A )
self.assertEqual(
A , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
_UpperCAmelCase : List[str] = text_generator(["This is a test", "This is a second test"] , do_sample=A )
self.assertEqual(
A , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def _A ( self : Any , A : List[str] , A : List[str] , A : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = TextGenerationPipeline(model=A , tokenizer=A )
return text_generator, ["This is a test", "Another test"]
def _A ( self : Any ):
_UpperCAmelCase : int = "Hello I believe in"
_UpperCAmelCase : int = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase : Optional[int] = text_generator(A )
self.assertEqual(
A , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
_UpperCAmelCase : List[str] = text_generator(A , stop_sequence=" fe" )
self.assertEqual(A , [{"generated_text": "Hello I believe in fe"}] )
def _A ( self : Optional[int] , A : Optional[Any] , A : List[Any] ):
_UpperCAmelCase : List[str] = text_generator.model
_UpperCAmelCase : str = text_generator.tokenizer
_UpperCAmelCase : Dict = text_generator("This is a test" )
self.assertEqual(A , [{"generated_text": ANY(A )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_UpperCAmelCase : Dict = text_generator("This is a test" , return_full_text=A )
self.assertEqual(A , [{"generated_text": ANY(A )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_UpperCAmelCase : int = pipeline(task="text-generation" , model=A , tokenizer=A , return_full_text=A )
_UpperCAmelCase : Optional[Any] = text_generator("This is a test" )
self.assertEqual(A , [{"generated_text": ANY(A )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_UpperCAmelCase : str = text_generator("This is a test" , return_full_text=A )
self.assertEqual(A , [{"generated_text": ANY(A )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_UpperCAmelCase : Dict = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=A )
self.assertEqual(
A , [
[{"generated_text": ANY(A )}, {"generated_text": ANY(A )}],
[{"generated_text": ANY(A )}, {"generated_text": ANY(A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCAmelCase : Optional[Any] = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=A )
self.assertEqual(
A , [
[{"generated_text": ANY(A )}, {"generated_text": ANY(A )}],
[{"generated_text": ANY(A )}, {"generated_text": ANY(A )}],
] , )
with self.assertRaises(A ):
_UpperCAmelCase : List[str] = text_generator("test" , return_full_text=A , return_text=A )
with self.assertRaises(A ):
_UpperCAmelCase : Tuple = text_generator("test" , return_full_text=A , return_tensors=A )
with self.assertRaises(A ):
_UpperCAmelCase : List[str] = text_generator("test" , return_text=A , return_tensors=A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCAmelCase : Any = text_generator("" )
self.assertEqual(A , [{"generated_text": ANY(A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCAmelCase : Dict = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCAmelCase : Any = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
_UpperCAmelCase : Any = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _A ( self : str ):
import torch
# Classic `model_kwargs`
_UpperCAmelCase : Any = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCAmelCase : List[str] = pipe("This is a test" )
self.assertEqual(
A , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCAmelCase : Tuple = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCAmelCase : List[Any] = pipe("This is a test" )
self.assertEqual(
A , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCAmelCase : Tuple = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCAmelCase : Optional[Any] = pipe("This is a test" )
self.assertEqual(
A , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def _A ( self : Tuple ):
import torch
_UpperCAmelCase : Optional[int] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def _A ( self : Optional[Any] ):
import torch
_UpperCAmelCase : int = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=A , top_p=0.5 )
def _A ( self : str ):
_UpperCAmelCase : Any = "Hello world"
_UpperCAmelCase : Any = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
_UpperCAmelCase : Tuple = logging.get_logger("transformers.generation.tf_utils" )
else:
_UpperCAmelCase : Optional[Any] = logging.get_logger("transformers.generation.utils" )
_UpperCAmelCase : int = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A ) as cl:
_UpperCAmelCase : int = text_generator(A , max_length=10 , max_new_tokens=1 )
self.assertIn(A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A ) as cl:
_UpperCAmelCase : Tuple = text_generator(A , max_new_tokens=1 )
self.assertNotIn(A , cl.out )
with CaptureLogger(A ) as cl:
_UpperCAmelCase : List[str] = text_generator(A , max_length=10 )
self.assertNotIn(A , cl.out )
| 31 |
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
order.append(SCREAMING_SNAKE_CASE__ )
return order
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return component
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ):
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
__UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1]
if not visited[vert]:
__UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
components_list.append(SCREAMING_SNAKE_CASE__ )
return components_list
| 62 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Tuple = '''deit'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1E-12 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_2_4 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_6 , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = hidden_size
a_ : Dict = num_hidden_layers
a_ : int = num_attention_heads
a_ : Optional[Any] = intermediate_size
a_ : Optional[int] = hidden_act
a_ : int = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : List[str] = initializer_range
a_ : Optional[Any] = layer_norm_eps
a_ : str = image_size
a_ : Dict = patch_size
a_ : Union[str, Any] = num_channels
a_ : Tuple = qkv_bias
a_ : int = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1E-4
| 32 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_A = {'vinai/bartpho-syllable': 1024}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =monolingual_vocab_file
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase ={}
__UpperCamelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =cnt
cnt += 1
with open(A_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__UpperCamelCase =line.strip().split()[0]
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
__UpperCamelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> List[str]:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Any:
return len(self.fairseq_ids_to_tokens )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self , A_ ) -> int:
return self.fairseq_ids_to_tokens[index]
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , A_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(A_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 62 | 0 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__A : List[str] = re.compile(R'''^(?P<major>\d+)''' R'''\.(?P<minor>\d+)''' R'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None
SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None
SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None
def A ( self : Optional[int] ) -> Union[str, Any]:
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : int ) -> List[Any]:
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def A ( self : Any ) -> Union[str, Any]:
return self.major, self.minor, self.patch
def A ( self : Dict , A : List[Any] ) -> Tuple:
if isinstance(A , A ):
return Version(A )
elif isinstance(A , A ):
return other
raise TypeError(F'''{other} (type {type(A )}) cannot be compared to version.''' )
def __eq__( self : Union[str, Any] , A : str ) -> List[str]:
try:
lowercase_ : Optional[int] = self._validate_operand(A )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int , A : str ) -> Union[str, Any]:
lowercase_ : Optional[Any] = self._validate_operand(A )
return self.tuple < other.tuple
def __hash__( self : Dict ) -> Optional[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def A ( cls : int , A : List[Any] ) -> List[str]:
lowercase_ : Dict = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def A ( self : Optional[int] ) -> str:
return self.version_str
def lowercase ( __snake_case : Any ):
lowercase_ : int = _VERSION_REG.match(__snake_case )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(__snake_case ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def lowercase ( __snake_case : int ):
return ".".join(str(__snake_case ) for v in version_tuple )
| 33 |
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
A =logging.get_logger(__name__) # pylint: disable=invalid-name
A ='\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def snake_case_ (_a : Dict , _a : Optional[int] , _a : Any=8 ):
UpperCAmelCase = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _a ( __a ):
def __init__( self : List[Any] , lowercase : MultilingualCLIP , lowercase : XLMRobertaTokenizer , lowercase : UNetaDConditionModel , lowercase : Union[DDIMScheduler, DDPMScheduler] , lowercase : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , movq=lowercase , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A ( self : int , lowercase : List[Any] , lowercase : int , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
if latents is None:
UpperCAmelCase = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase = latents.to(lowercase )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : int , lowercase : str , lowercase : Dict=None , ):
'''simple docstring'''
UpperCAmelCase = len(lowercase ) if isinstance(lowercase , lowercase ) else 1
# get prompt text embeddings
UpperCAmelCase = self.tokenizer(
lowercase , padding='''max_length''' , truncation=lowercase , max_length=77 , return_attention_mask=lowercase , add_special_tokens=lowercase , return_tensors='''pt''' , )
UpperCAmelCase = text_inputs.input_ids
UpperCAmelCase = self.tokenizer(lowercase , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowercase , lowercase ):
UpperCAmelCase = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase = text_input_ids.to(lowercase )
UpperCAmelCase = text_inputs.attention_mask.to(lowercase )
UpperCAmelCase , UpperCAmelCase = self.text_encoder(
input_ids=lowercase , attention_mask=lowercase )
UpperCAmelCase = prompt_embeds.repeat_interleave(lowercase , dim=0 )
UpperCAmelCase = text_encoder_hidden_states.repeat_interleave(lowercase , dim=0 )
UpperCAmelCase = text_mask.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = 42
if negative_prompt is None:
UpperCAmelCase = [''''''] * batch_size
elif type(lowercase ) is not type(lowercase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase )} !="
f" {type(lowercase )}." )
elif isinstance(lowercase , lowercase ):
UpperCAmelCase = [negative_prompt]
elif batch_size != len(lowercase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(lowercase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase = negative_prompt
UpperCAmelCase = self.tokenizer(
lowercase , padding='''max_length''' , max_length=77 , truncation=lowercase , return_attention_mask=lowercase , add_special_tokens=lowercase , return_tensors='''pt''' , )
UpperCAmelCase = uncond_input.input_ids.to(lowercase )
UpperCAmelCase = uncond_input.attention_mask.to(lowercase )
UpperCAmelCase , UpperCAmelCase = self.text_encoder(
input_ids=lowercase , attention_mask=lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase = negative_prompt_embeds.shape[1]
UpperCAmelCase = negative_prompt_embeds.repeat(1 , lowercase )
UpperCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase )
UpperCAmelCase = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase = uncond_text_encoder_hidden_states.repeat(1 , lowercase , 1 )
UpperCAmelCase = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowercase , -1 )
UpperCAmelCase = uncond_text_mask.repeat_interleave(lowercase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def A ( self : Any , lowercase : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
def A ( self : str , lowercase : str=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase , lowercase , prev_module_hook=lowercase )
if self.safety_checker is not None:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(self.safety_checker , lowercase , prev_module_hook=lowercase )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A ( self : str ):
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self : Optional[Any] , lowercase : Union[str, List[str]] , lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase : Optional[Union[str, List[str]]] = None , lowercase : int = 512 , lowercase : int = 512 , lowercase : int = 100 , lowercase : float = 4.0 , lowercase : int = 1 , lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[str] = "pil" , lowercase : bool = True , ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
UpperCAmelCase = 1
elif isinstance(lowercase , lowercase ):
UpperCAmelCase = len(lowercase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(lowercase )}" )
UpperCAmelCase = self._execution_device
UpperCAmelCase = batch_size * num_images_per_prompt
UpperCAmelCase = guidance_scale > 1.0
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._encode_prompt(
lowercase , lowercase , lowercase , lowercase , lowercase )
if isinstance(lowercase , lowercase ):
UpperCAmelCase = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
UpperCAmelCase = torch.cat(lowercase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowercase )
self.scheduler.set_timesteps(lowercase , device=lowercase )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.unet.config.in_channels
UpperCAmelCase , UpperCAmelCase = get_new_h_w(lowercase , lowercase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowercase , lowercase , lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase , timestep=lowercase , encoder_hidden_states=lowercase , added_cond_kwargs=lowercase , return_dict=lowercase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase , lowercase , lowercase , generator=lowercase , ).prev_sample
# post-processing
UpperCAmelCase = self.movq.decode(lowercase , force_not_quantize=lowercase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 34 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None:
super().__init__(**A_ )
__UpperCamelCase =size if size is not None else {'shortest_edge': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
__UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' )
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =resample
__UpperCamelCase =do_center_crop
__UpperCamelCase =crop_size
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_normalize
__UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase =do_convert_rgb
def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image:
__UpperCamelCase =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase =size if size is not None else self.size
__UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ )
__UpperCamelCase =resample if resample is not None else self.resample
__UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase =crop_size if crop_size is not None else self.crop_size
__UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ )
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase =image_mean if image_mean is not None else self.image_mean
__UpperCamelCase =image_std if image_std is not None else self.image_std
__UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase =make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase =[convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(A_ ) for image in images]
if do_resize:
__UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
__UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 62 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "yolos"
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
__UpperCamelCase =num_detection_tokens
__UpperCamelCase =use_mid_position_embeddings
__UpperCamelCase =auxiliary_loss
# Hungarian matcher
__UpperCamelCase =class_cost
__UpperCamelCase =bbox_cost
__UpperCamelCase =giou_cost
# Loss coefficients
__UpperCamelCase =bbox_loss_coefficient
__UpperCamelCase =giou_loss_coefficient
__UpperCamelCase =eos_coefficient
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self ) -> float:
return 1E-4
@property
def _a ( self ) -> int:
return 12
| 62 | 0 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_snake_case = ["text", "image", "audio"]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
inputs.append(create_inputs(_lowerCamelCase ) )
else:
raise ValueError(F"Invalid type requested: {input_type}" )
return inputs
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for output in outputs:
if isinstance(_lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"Invalid output: {output}" )
return output_types
@is_tool_test
class UpperCAmelCase_ :
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(hasattr(self.tool, "inputs"))
self.assertTrue(hasattr(self.tool, "outputs"))
_lowerCAmelCase : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input, __a):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
_lowerCAmelCase : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = create_inputs(self.tool.inputs)
_lowerCAmelCase : Dict = self.tool(*__a)
# There is a single output
if len(self.tool.outputs) == 1:
_lowerCAmelCase : Dict = [outputs]
self.assertListEqual(output_types(__a), self.tool.outputs)
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(hasattr(self.tool, "description"))
self.assertTrue(hasattr(self.tool, "default_checkpoint"))
self.assertTrue(self.tool.description.startswith("This is a tool that"))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = create_inputs(self.tool.inputs)
_lowerCAmelCase : Any = self.tool(*__a)
if not isinstance(__a, __a):
_lowerCAmelCase : str = [outputs]
self.assertEqual(len(__a), len(self.tool.outputs))
for output, output_type in zip(__a, self.tool.outputs):
_lowerCAmelCase : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = create_inputs(self.tool.inputs)
_lowerCAmelCase : Tuple = []
for _input, input_type in zip(__a, self.tool.inputs):
if isinstance(__a, __a):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
_lowerCAmelCase : Dict = self.tool(*__a)
if not isinstance(__a, __a):
_lowerCAmelCase : Any = [outputs]
self.assertEqual(len(__a), len(self.tool.outputs))
| 36 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
_lowerCAmelCase = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ : Dict = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase__ : Tuple = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase__ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase__ : Dict = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase__ : Any = unk_token if pad_token is None else pad_token
lowerCAmelCase__ : Dict = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase__ : List[str] = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase__ : Optional[int] = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : Optional[int] = do_lower_case
lowerCAmelCase__ : Dict = remove_space
lowerCAmelCase__ : Optional[Any] = keep_accents
lowerCAmelCase__ : int = vocab_file
lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase__ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase__ : List[str] = re.compile(
F"""[{''.join(map(__UpperCAmelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ) -> Any:
lowerCAmelCase__ : int = self.__dict__.copy()
lowerCAmelCase__ : Optional[int] = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase_ ( self ) -> int:
return len(self.sp_model )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Tuple = self.non_printing_characters_re.sub("""""" ,__UpperCAmelCase )
# Normalize whitespaces
lowerCAmelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFC""" ,__UpperCAmelCase )
return text
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[Any] = self.preprocess_text(__UpperCAmelCase )
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return self.sp_model.PieceToId(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.sp_model.IdToPiece(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ) -> str:
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[int] = """"""
lowerCAmelCase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Any = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string
def UpperCAmelCase_ ( self ) -> Dict[str, int]:
lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Optional[int] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = self.preprocess_text(__UpperCAmelCase )
lowerCAmelCase__ : int = self.sp_model.encode(__UpperCAmelCase )
else:
lowerCAmelCase__ : int = [self.preprocess_text(__UpperCAmelCase ) for t in text]
lowerCAmelCase__ : Any = self.sp_model.encode(__UpperCAmelCase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase__ : Tuple = torch.tensor(__UpperCAmelCase )
return token_ids
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.sp_model.decode(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]:
lowerCAmelCase__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowerCAmelCase__ : Any = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__UpperCAmelCase )
| 37 |
from __future__ import annotations
import math
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> None:
__UpperCamelCase =size
# approximate the overall size of segment tree with given value
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
# create array to store lazy update
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
__UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update
def _a ( self , A_ ) -> int:
return idx * 2
def _a ( self , A_ ) -> int:
return idx * 2 + 1
def _a ( self , A_ , A_ , A_ , A_ ) -> None:
if left_element == right_element:
__UpperCamelCase =a[left_element - 1]
else:
__UpperCamelCase =(left_element + right_element) // 2
self.build(self.left(A_ ) , A_ , A_ , A_ )
self.build(self.right(A_ ) , mid + 1 , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__UpperCamelCase =val
if left_element != right_element:
__UpperCamelCase =val
__UpperCamelCase =val
__UpperCamelCase =True
__UpperCamelCase =True
return True
__UpperCamelCase =(left_element + right_element) // 2
self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ )
self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
return True
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__UpperCamelCase =(left_element + right_element) // 2
__UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ )
__UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ )
return max(A_ , A_ )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_A = 15
_A = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 62 | 0 |
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int ) -> list[int]:
"""simple docstring"""
UpperCamelCase :List[str] = [0] * no_of_processes
UpperCamelCase :str = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__magic_name__ ):
UpperCamelCase :Optional[int] = burst_time[i]
UpperCamelCase :str = 0
UpperCamelCase :Tuple = 0
UpperCamelCase :Union[str, Any] = 9_9999_9999
UpperCamelCase :Optional[Any] = 0
UpperCamelCase :Optional[int] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__magic_name__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCamelCase :Dict = remaining_time[j]
UpperCamelCase :Optional[Any] = j
UpperCamelCase :List[str] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCamelCase :List[str] = remaining_time[short]
if minm == 0:
UpperCamelCase :Any = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
UpperCamelCase :Dict = False
# Find finish time of current process
UpperCamelCase :Dict = increment_time + 1
# Calculate waiting time
UpperCamelCase :Union[str, Any] = finish_time - arrival_time[short]
UpperCamelCase :Optional[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCamelCase :Optional[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : list[int] ) -> list[int]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = [0] * no_of_processes
for i in range(__magic_name__ ):
UpperCamelCase :List[str] = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCamelCase :int = 0
UpperCamelCase :str = 0
for i in range(__magic_name__ ):
UpperCamelCase :List[Any] = total_waiting_time + waiting_time[i]
UpperCamelCase :str = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
UpperCAmelCase_ : Any = int(input())
UpperCAmelCase_ : Optional[int] = [0] * no_of_processes
UpperCAmelCase_ : Any = [0] * no_of_processes
UpperCAmelCase_ : Any = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
UpperCAmelCase_ , UpperCAmelCase_ : Dict = map(int, input().split())
UpperCAmelCase_ : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase_ : Tuple = burst_time
UpperCAmelCase_ : List[str] = no_of_processes
UpperCAmelCase_ : Union[str, Any] = waiting_time
UpperCAmelCase_ : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase_ : Tuple = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 38 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ):
__UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' )
__UpperCamelCase =soup.find_all('td' , attrs='titleColumn' )
__UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ):
__UpperCamelCase =get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file:
__UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | 0 |
_a = {}
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , __lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , __lowerCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def __A ( __lowerCAmelCase = 30 )-> int:
"""simple docstring"""
return _calculate(__lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 39 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip_vision_model"
def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =qkv_bias
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "instructblip_qformer"
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =cross_attention_frequency
__UpperCamelCase =encoder_hidden_size
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip"
UpperCAmelCase__ : Optional[Any] = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]:
super().__init__(**A_ )
if vision_config is None:
__UpperCamelCase ={}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__UpperCamelCase ={}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__UpperCamelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCamelCase =InstructBlipVisionConfig(**A_ )
__UpperCamelCase =InstructBlipQFormerConfig(**A_ )
__UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ )
__UpperCamelCase =self.text_config.tie_word_embeddings
__UpperCamelCase =self.text_config.is_encoder_decoder
__UpperCamelCase =num_query_tokens
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase =1.0
__UpperCamelCase =0.02
@classmethod
def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.qformer_config.to_dict()
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 62 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(rows * cols * num_images )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
__UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =labels_dense.shape[0]
__UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
__UpperCamelCase =numpy.zeros((num_labels, num_classes) )
__UpperCamelCase =1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
A_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__UpperCamelCase =10000
__UpperCamelCase =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCamelCase =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCamelCase =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCamelCase =images.astype(numpy.floataa )
__UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 )
__UpperCamelCase =images
__UpperCamelCase =labels
__UpperCamelCase =0
__UpperCamelCase =0
@property
def _a ( self ) -> Tuple:
return self._images
@property
def _a ( self ) -> Union[str, Any]:
return self._labels
@property
def _a ( self ) -> Optional[Any]:
return self._num_examples
@property
def _a ( self ) -> List[str]:
return self._epochs_completed
def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]:
if fake_data:
__UpperCamelCase =[1] * 784
__UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
__UpperCamelCase =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perma]
__UpperCamelCase =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCamelCase =self._num_examples - start
__UpperCamelCase =self._images[start : self._num_examples]
__UpperCamelCase =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perm]
__UpperCamelCase =self.labels[perm]
# Start next epoch
__UpperCamelCase =0
__UpperCamelCase =batch_size - rest_num_examples
__UpperCamelCase =self._index_in_epoch
__UpperCamelCase =self._images[start:end]
__UpperCamelCase =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCamelCase =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
__UpperCamelCase =f.size()
print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =fake()
__UpperCamelCase =fake()
__UpperCamelCase =fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
__UpperCamelCase =DEFAULT_SOURCE_URL
__UpperCamelCase ='train-images-idx3-ubyte.gz'
__UpperCamelCase ='train-labels-idx1-ubyte.gz'
__UpperCamelCase ='t10k-images-idx3-ubyte.gz'
__UpperCamelCase ='t10k-labels-idx1-ubyte.gz'
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =(
'Validation size should be between 0 and '
F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =train_images[:validation_size]
__UpperCamelCase =train_labels[:validation_size]
__UpperCamelCase =train_images[validation_size:]
__UpperCamelCase =train_labels[validation_size:]
__UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed}
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 62 | 0 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_A : Tuple ='''bert-base-cased'''
_A : Tuple ='''google/pegasus-xsum'''
_A : Optional[int] =[''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
_A : Tuple =['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
_A : str ='''patrickvonplaten/t5-tiny-random'''
_A : Union[str, Any] ='''sshleifer/bart-tiny-random'''
_A : List[str] ='''sshleifer/tiny-mbart'''
_A : Any ='''sshleifer/tiny-marian-en-de'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Tuple = """\n""".join(UpperCamelCase )
Path(UpperCamelCase ).open("""w""" ).writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase , f'''{split}.source''' ) , UpperCamelCase )
_dump_articles(os.path.join(UpperCamelCase , f'''{split}.target''' ) , UpperCamelCase )
return tmp_dir
class _lowercase ( _lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase__ : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
lowerCamelCase__ : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : str = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
lowerCamelCase__ : Any = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , )
lowerCamelCase__ : Any = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCamelCase__ : int = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase__ : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
lowerCamelCase__ : Tuple = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
lowerCamelCase__ : str = 4
lowerCamelCase__ : Tuple = LegacySeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=20 , max_target_length=UpperCamelCase__ , )
lowerCamelCase__ : Tuple = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
lowerCamelCase__ : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCamelCase__ : List[str] = tmp_dir.joinpath("""train.source""" ).open().readlines()
lowerCamelCase__ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCamelCase__ , UpperCamelCase__ , 128 , UpperCamelCase__ )
lowerCamelCase__ : Any = {x.name for x in tmp_dir.iterdir()}
lowerCamelCase__ : Union[str, Any] = {x.name for x in save_dir.iterdir()}
lowerCamelCase__ : Union[str, Any] = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCamelCase__ ) < len(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCamelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def lowerCamelCase_ ( self: Dict ):
if not FAIRSEQ_AVAILABLE:
return
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = self._get_dataset(max_len=64 )
lowerCamelCase__ : List[str] = 64
lowerCamelCase__ : Union[str, Any] = ds.make_dynamic_sampler(UpperCamelCase__ , required_batch_size_multiple=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [len(UpperCamelCase__ ) for x in batch_sampler]
assert len(set(UpperCamelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCamelCase__ ) == len(UpperCamelCase__ ) # no dropped or added examples
lowerCamelCase__ : Optional[int] = DataLoader(UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Optional[int] = []
for batch in data_loader:
lowerCamelCase__ : Optional[Any] = batch["""input_ids"""].shape
lowerCamelCase__ : Tuple = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCamelCase__ : Union[str, Any] = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(UpperCamelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCamelCase__ )
assert num_src_per_batch[0] == max(UpperCamelCase__ )
if failures:
raise AssertionError(F'''too many tokens in {len(UpperCamelCase__ )} batches''' )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self._get_dataset(max_len=512 )
lowerCamelCase__ : Union[str, Any] = 2
lowerCamelCase__ : Optional[int] = ds.make_sortish_sampler(UpperCamelCase__ , shuffle=UpperCamelCase__ )
lowerCamelCase__ : Any = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase__ : Dict = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=UpperCamelCase__ )
lowerCamelCase__ : List[str] = tokenizer.pad_token_id
def count_pad_tokens(UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int]="input_ids" ):
return [batch[k].eq(UpperCamelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCamelCase__ , k="""labels""" ) ) < sum(count_pad_tokens(UpperCamelCase__ , k="""labels""" ) )
assert sum(count_pad_tokens(UpperCamelCase__ ) ) < sum(count_pad_tokens(UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Tuple=1_000 , UpperCamelCase__: Dict=128 ):
if os.getenv("""USE_REAL_DATA""" , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = """examples/seq2seq/wmt_en_ro"""
lowerCamelCase__ : List[str] = max_len * 2 * 64
if not Path(UpperCamelCase__ ).joinpath("""train.len""" ).exists():
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
else:
lowerCamelCase__ : Optional[int] = """examples/seq2seq/test_data/wmt_en_ro"""
lowerCamelCase__ : str = max_len * 4
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Any = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , n_obs=UpperCamelCase__ , )
return ds, max_tokens, tokenizer
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self._get_dataset()
lowerCamelCase__ : Union[str, Any] = set(DistributedSortishSampler(UpperCamelCase__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=UpperCamelCase__ ) )
lowerCamelCase__ : Dict = set(DistributedSortishSampler(UpperCamelCase__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=UpperCamelCase__ ) )
assert idsa.intersection(UpperCamelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase__ , use_fast=UpperCamelCase__ )
if tok_name == MBART_TINY:
lowerCamelCase__ : Tuple = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
lowerCamelCase__ : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCamelCase__ : str = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
lowerCamelCase__ : Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCamelCase__ ) == 1 if tok_name == BART_TINY else len(UpperCamelCase__ ) == 0
| 41 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = TransfoXLTokenizer
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
def _a ( self ) -> Union[str, Any]:
super().setUp()
__UpperCamelCase =[
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _a ( self , **A_ ) -> Optional[int]:
__UpperCamelCase =True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='<unk> UNwanted , running'
__UpperCamelCase ='<unk> unwanted, running'
return input_text, output_text
def _a ( self ) -> str:
__UpperCamelCase =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ )
__UpperCamelCase =tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] )
def _a ( self ) -> Any:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> int:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
__UpperCamelCase ='Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
__UpperCamelCase =[
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =len(A_ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 62 | 0 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , lowerCAmelCase_ , )
super().__init__(args=lowerCAmelCase_ , **lowerCAmelCase_ )
| 42 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not nums:
return 0
__UpperCamelCase :str = nums[0]
__UpperCamelCase :Optional[int] = 0
for num in nums[1:]:
__UpperCamelCase , __UpperCamelCase :List[str] = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
)
return max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return np.sum(outputs == labels )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f:
__UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
next(SCREAMING_SNAKE_CASE__ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
for dataset in encoded_datasets:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =mc_label
__UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) )
return tensor_datasets
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
print(SCREAMING_SNAKE_CASE__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCamelCase =['_start_', '_delimiter_', '_classify_']
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
model.to(SCREAMING_SNAKE_CASE__ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj]
logger.info('Encoding dataset...' )
__UpperCamelCase =load_rocstories_dataset(args.train_dataset )
__UpperCamelCase =load_rocstories_dataset(args.eval_dataset )
__UpperCamelCase =(train_dataset, eval_dataset)
__UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ )
# Compute the max input length for the Transformer
__UpperCamelCase =model.config.n_positions // 2 - 2
__UpperCamelCase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1]
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size )
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCamelCase =args.max_steps
__UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCamelCase =list(model.named_parameters() )
__UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__UpperCamelCase =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCamelCase =get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
if args.do_train:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCamelCase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE__ )
if args.do_eval:
model.eval()
__UpperCamelCase , __UpperCamelCase =0, 0
__UpperCamelCase , __UpperCamelCase =0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model(
SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =mc_logits.detach().cpu().numpy()
__UpperCamelCase =mc_labels.to('cpu' ).numpy()
__UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCamelCase =eval_loss / nb_eval_steps
__UpperCamelCase =eval_accuracy / nb_eval_examples
__UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None
__UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 62 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_lowerCamelCase ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_lowerCamelCase ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_lowerCamelCase )
return parser.parse_args()
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_lowerCAmelCase : List[Any] = parse_args()
# Import training_script as a module.
_lowerCAmelCase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCAmelCase : Union[str, Any] = script_fpath.stem
_lowerCAmelCase : Optional[Any] = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
_lowerCAmelCase : Tuple = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 44 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ):
__UpperCamelCase =1
__UpperCamelCase =0
__UpperCamelCase =1
__UpperCamelCase =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = True
__a = 99
__a = 384
__a = 2
__a = 4
__a = 37
__a = '''gelu'''
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = 128
__a = 2
__a = 9
__a = 1
__a = None
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = TFConvBertModel(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = [input_ids, input_mask]
__a = model(_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = TFConvBertForMaskedLM(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFConvBertForSequenceClassification(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_choices
__a = TFConvBertForMultipleChoice(config=_a )
__a = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
__a = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFConvBertForTokenClassification(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = TFConvBertForQuestionAnswering(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
def __UpperCAmelCase ( self ):
__a = TFConvBertModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = True
if hasattr(_a , '''use_cache''' ):
__a = True
__a = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__a = getattr(self.model_tester , '''key_length''' , _a )
for model_class in self.all_model_classes:
__a = self._prepare_for_class(_a , _a )
__a = model_class(_a )
__a = len(model(_a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a , saved_model=_a )
__a = os.path.join(_a , '''saved_model''' , '''1''' )
__a = tf.keras.models.load_model(_a )
__a = model(_a )
if self.is_encoder_decoder:
__a = outputs['''encoder_hidden_states''']
__a = outputs['''encoder_attentions''']
else:
__a = outputs['''hidden_states''']
__a = outputs['''attentions''']
self.assertEqual(len(_a ) , _a )
__a = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_a ) , _a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCAmelCase ( self ):
__a = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
__a = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__a = getattr(self.model_tester , '''key_length''' , _a )
__a = getattr(self.model_tester , '''key_length''' , _a )
def check_decoder_attentions_output(_a ):
__a = len(_a )
self.assertEqual(out_len % 2 , 0 )
__a = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_a ):
__a = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__a = True
__a = False
__a = model_class(_a )
__a = model(self._prepare_for_class(_a , _a ) )
__a = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__a = model_class(_a )
__a = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_a )
__a = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_a )
__a = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(_a )[0]
__a = [1, 6, 768]
self.assertEqual(output.shape , _a )
__a = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-4 )
| 45 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase = False
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = datetime.now()
lowerCAmelCase = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 46 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : str = {"vocab_file": "sentencepiece.bpe.model"}
lowerCamelCase : int = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
lowerCamelCase : str = {
"camembert-base": 5_1_2,
}
lowerCamelCase : List[Any] = "▁"
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , _a : str , _a : Optional[int]="<s>" , _a : Any="</s>" , _a : Tuple="</s>" , _a : Tuple="<s>" , _a : str="<unk>" , _a : Optional[Any]="<pad>" , _a : Optional[Any]="<mask>" , _a : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , _a : Optional[Dict[str, Any]] = None , **_a : List[str] , ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_SCREAMING_SNAKE_CASE =vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_SCREAMING_SNAKE_CASE ={'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
_SCREAMING_SNAKE_CASE =len(self.fairseq_tokens_to_ids )
_SCREAMING_SNAKE_CASE =len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : Optional[Any] , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def A ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Optional[int] , _a : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_a , out_type=_a )
def A ( self : Optional[int] , _a : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_a )
def A ( self : Tuple , _a : Any ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : List[Any] , _a : List[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =[]
else:
current_sub_tokens.append(_a )
_SCREAMING_SNAKE_CASE =False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self : str ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self : Union[str, Any] , _a : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Any , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE =os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , 'wb' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 47 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
def A ( _SCREAMING_SNAKE_CASE ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be a 'int' type" )
return bin(_SCREAMING_SNAKE_CASE ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ ) -> float:
return 0.0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show()
| 62 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__snake_case :Dict = logging.getLogger(__name__)
def __snake_case ( ):
__a = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_UpperCAmelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_UpperCAmelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_UpperCAmelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_UpperCAmelCase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_UpperCAmelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_UpperCAmelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_UpperCAmelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__a = parser.parse_args()
return args
def __snake_case ( _UpperCAmelCase ):
def fn(_UpperCAmelCase ):
return tokenizer(examples['''text'''] )
return fn
def __snake_case ( _UpperCAmelCase ):
__a = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__a = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__a = tf.train.Features(feature=_UpperCAmelCase )
__a = tf.train.Example(features=_UpperCAmelCase )
__a = example.SerializeToString()
records.append(_UpperCAmelCase )
return records
def __snake_case ( _UpperCAmelCase ):
__a = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__a = min(len(_UpperCAmelCase ) , args.limit )
__a = dataset.select(range(_UpperCAmelCase ) )
print(f'Limiting the dataset to {args.limit} entries.' )
__a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__a = os.path.join(args.output_dir , args.split )
if not os.path.exists(_UpperCAmelCase ):
os.makedirs(_UpperCAmelCase )
else:
__a = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__a = tokenize_function(_UpperCAmelCase )
__a = dataset.map(_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_UpperCAmelCase ):
# Concatenate all texts.
__a = {k: sum(examples[k] , [] ) for k in examples.keys()}
__a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__a = {
k: [t[i : i + args.max_length] for i in range(0 , _UpperCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__a = dataset_tokenized.map(_UpperCAmelCase , batched=_UpperCAmelCase , batch_size=1000 , num_proc=4 )
__a = 0
__a = 0
for shard in range(0 , len(_UpperCAmelCase ) , args.shard_size ):
__a = grouped_dataset[shard : shard + args.shard_size]
__a = len(dataset_snapshot['''input_ids'''] )
__a = os.path.join(_UpperCAmelCase , f'dataset-{shard_count}-{records_containing}.tfrecord' )
__a = get_serialized_examples(_UpperCAmelCase )
with tf.io.TFRecordWriter(_UpperCAmelCase ) as out_file:
for i in range(len(_UpperCAmelCase ) ):
__a = serialized_examples[i]
out_file.write(_UpperCAmelCase )
print('''Wrote file {} containing {} records'''.format(_UpperCAmelCase , _UpperCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(f'split-{args.split}-records-count.txt' , '''w''' ) as f:
print(f'Total {args.split} records: {total_records}' , file=_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[str] = parse_args()
main(args)
| 49 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> str:
if attention_mask is None:
lowerCamelCase__ : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : Tuple = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Tuple = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCAmelCase :
def __init__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : int=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=4 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : List[Any]="relu" , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : str=20 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : str=1 , UpperCAmelCase : List[str]=0 , ) -> Dict:
lowerCamelCase__ : Optional[int] = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Dict = encoder_layerdrop
lowerCamelCase__ : List[Any] = decoder_layerdrop
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : str = eos_token_id
lowerCamelCase__ : int = pad_token_id
lowerCamelCase__ : Union[str, Any] = bos_token_id
def A_ ( self : Any ) -> int:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = self.eos_token_id # Eos Token
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : Any = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Tuple = self.get_config()
lowerCamelCase__ : str = prepare_mam_aaa_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Optional[int] ) -> Union[str, Any]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def A_ ( self : int ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ) -> Any:
lowerCamelCase__ : Any = MaMaaaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
lowerCamelCase__ : Tuple = inputs_dict['input_ids']
lowerCamelCase__ : Tuple = inputs_dict['attention_mask']
lowerCamelCase__ : str = inputs_dict['head_mask']
# first forward pass
lowerCamelCase__ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase__ : Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[
'last_hidden_state'
]
# select random slice
lowerCamelCase__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-2 ) )
def A_ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : str ) -> List[str]:
lowerCamelCase__ : Optional[int] = MaMaaaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )
lowerCamelCase__ : Dict = outputs.encoder_last_hidden_state
lowerCamelCase__ : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Optional[int] = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : List[Any] = MaMaaaEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : int = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : List[Any] = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Any = MaMaaaDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : Any = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : List[str] ) -> Any:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Union[str, Any] = MaMaaaModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCAmelCase )
def A_ ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def A_ ( self : Optional[int] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertEqual(info['missing_keys'] , [] )
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase )
def A_ ( self : Tuple ) -> List[str]:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase )
def A_ ( self : Optional[int] ) -> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCamelCase__ : Union[str, Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[str] = copy.deepcopy(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
if not self.is_encoder_decoder:
lowerCamelCase__ : Tuple = inputs['input_ids']
del inputs["input_ids"]
else:
lowerCamelCase__ : List[str] = inputs['input_ids']
lowerCamelCase__ : List[Any] = inputs.get('decoder_input_ids' , UpperCAmelCase )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , UpperCAmelCase )
lowerCamelCase__ : List[str] = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCamelCase__ : List[Any] = wte(UpperCAmelCase )
else:
lowerCamelCase__ : int = wte(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = wte(UpperCAmelCase )
with torch.no_grad():
model(**UpperCAmelCase )[0]
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Dict = input_dict['input_ids']
lowerCamelCase__ : List[str] = input_ids.ne(1 ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = MaMaaaForConditionalGeneration(UpperCAmelCase ).eval().to(UpperCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(UpperCAmelCase , attention_mask=UpperCAmelCase )
model.generate(num_beams=4 , do_sample=UpperCAmelCase , early_stopping=UpperCAmelCase , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
return torch.tensor(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A_ ( self : Dict ) -> Optional[int]:
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase )
lowerCamelCase__ : str = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowerCamelCase__ : Any = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowerCamelCase__ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**UpperCAmelCase )[0]
lowerCamelCase__ : Tuple = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , UpperCAmelCase )
# change to expected output here
lowerCamelCase__ : int = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def A_ ( self : List[str] ) -> Dict:
lowerCamelCase__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase )
# change to intended input
lowerCamelCase__ : Union[str, Any] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowerCamelCase__ : Tuple = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowerCamelCase__ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCAmelCase )[0]
lowerCamelCase__ : Dict = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase )
# change to expected output here
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def A_ ( self : int ) -> str:
lowerCamelCase__ : List[str] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase )
lowerCamelCase__ : str = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
lowerCamelCase__ : Dict = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCamelCase__ : List[str] = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
lowerCamelCase__ : Tuple = model.generate(
input_ids=dct['input_ids'].to(UpperCAmelCase ) , attention_mask=dct['attention_mask'].to(UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
lowerCamelCase__ : str = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
lowerCamelCase__ : Tuple = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
assert generated == expected_en
| 50 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "mvp"
UpperCAmelCase__ : Tuple = ["past_key_values"]
UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =classifier_dropout
__UpperCamelCase =use_cache
__UpperCamelCase =encoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase =use_prompt
__UpperCamelCase =prompt_length
__UpperCamelCase =prompt_mid_dim
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
__UpperCamelCase =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 62 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def A (__A : Tuple , __A : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
UpperCAmelCase_ = DatasetInfosDict.from_directory(__A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def A (__A : Optional[int] , __A : DatasetInfo ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = str(__A )
dataset_info.write_to_directory(__A )
UpperCAmelCase_ = DatasetInfo.from_directory(__A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__A , '''dataset_info.json''' ) )
def A () -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
UpperCAmelCase_ = dataset_info._to_yaml_dict()
assert sorted(__A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCAmelCase_ = yaml.safe_dump(__A )
UpperCAmelCase_ = yaml.safe_load(__A )
assert dataset_info_yaml_dict == reloaded
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = DatasetInfo()
UpperCAmelCase_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def A (__A : Optional[Any] , __A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = str(__A )
dataset_infos_dict.write_to_directory(__A )
UpperCAmelCase_ = DatasetInfosDict.from_directory(__A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__A , '''README.md''' ) )
| 51 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = GPTaTokenizer
UpperCAmelCase__ : Any = GPTaTokenizerFast
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : int = {"add_prefix_space": True}
UpperCAmelCase__ : Any = False
def _a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase ={'unk_token': '<unk>'}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _a ( self , **A_ ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , **A_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='lower newer'
__UpperCamelCase ='lower newer'
return input_text, output_text
def _a ( self ) -> List[Any]:
__UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='lower newer'
__UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self ) -> int:
if not self.test_rust_tokenizer:
return
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase ='lower newer'
# Testing tokenization
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids without special tokens
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids with special tokens
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# Testing the unknown token
__UpperCamelCase =tokens + [rust_tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self , *A_ , **A_ ) -> Optional[int]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _a ( self , A_=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
def _a ( self ) -> int:
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__UpperCamelCase =tokenizer.pad_token_id
__UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
__UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ='$$$'
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ )
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =tokenizer.bos_token_id
__UpperCamelCase =tokenizer(A_ )
__UpperCamelCase =tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] , A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__UpperCamelCase =tokenizer.decode(out_s.input_ids )
__UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> Any:
# TODO: change to self.get_tokenizers() when the fast version is implemented
__UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='Encode this.'
__UpperCamelCase ='This one too please.'
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(
A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , )
__UpperCamelCase =encoded_sequence_dict['input_ids']
__UpperCamelCase =encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(A_ ) , len(A_ ) )
__UpperCamelCase =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
__UpperCamelCase =[x for x in filtered_sequence if x is not None]
self.assertEqual(A_ , A_ )
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
def _a ( self ) -> Dict:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# Same as above
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='bos'
__UpperCamelCase =tokenizer.get_vocab()['bos']
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# We changed the bos token
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
| 62 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : Tuple = is_training
UpperCamelCase : Union[str, Any] = use_input_mask
UpperCamelCase : Tuple = use_token_type_ids
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : str = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : Dict = initializer_range
UpperCamelCase : int = num_labels
UpperCamelCase : Optional[int] = scope
UpperCamelCase : int = range_bbox
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase : Union[str, Any] = bbox[i, j, 3]
UpperCamelCase : int = bbox[i, j, 1]
UpperCamelCase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase : List[str] = bbox[i, j, 2]
UpperCamelCase : Optional[int] = bbox[i, j, 0]
UpperCamelCase : Optional[Any] = t
UpperCamelCase : Dict = None
if self.use_input_mask:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase : str = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Dict = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Any = LiltModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ )
UpperCamelCase : Any = model(A_ , bbox=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Dict = LiltForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[str] = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Tuple = config_and_inputs
UpperCamelCase : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Union[str, Any] = False
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
return True
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = LiltModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Dict = LiltModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@slow
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ )
UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ )
UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ )
UpperCamelCase : List[str] = torch.Size([1, 2, 768] )
UpperCamelCase : Any = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , )
self.assertTrue(outputs.last_hidden_state.shape , A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
| 52 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 62 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ : List[str] =logging.get_logger(__name__)
def lowercase__ ( __lowercase : str , __lowercase : List[Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase = b.T
__UpperCamelCase = np.sum(np.square(__lowercase ) , axis=1 )
__UpperCamelCase = np.sum(np.square(__lowercase ) , axis=0 )
__UpperCamelCase = np.matmul(__lowercase , __lowercase )
__UpperCamelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = x.reshape(-1 , 3 )
__UpperCamelCase = squared_euclidean_distance(__lowercase , __lowercase )
return np.argmin(__lowercase , axis=1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =["pixel_values"]
def __init__( self : Union[str, Any] , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : bool = True , __A : Dict[str, int] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : bool = True , **__A : List[str] , ):
super().__init__(**__A )
__UpperCamelCase = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
__UpperCamelCase = get_size_dict(__A )
__UpperCamelCase = np.array(__A ) if clusters is not None else None
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = do_normalize
__UpperCamelCase = do_color_quantize
def _lowerCamelCase ( self : Optional[int] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Union[str, Any] , ):
__UpperCamelCase = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__A , size=(size['height'], size['width']) , resample=__A , data_format=__A , **__A )
def _lowerCamelCase ( self : Dict , __A : np.ndarray , __A : Optional[Union[str, ChannelDimension]] = None , ):
__UpperCamelCase = rescale(image=__A , scale=1 / 127.5 , data_format=__A )
__UpperCamelCase = image - 1
return image
def _lowerCamelCase ( self : Optional[Any] , __A : ImageInput , __A : bool = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Optional[bool] = None , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(__A )
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCamelCase = clusters if clusters is not None else self.clusters
__UpperCamelCase = np.array(__A )
__UpperCamelCase = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__A ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=__A ) for image in images]
if do_color_quantize:
__UpperCamelCase = [to_channel_dimension_format(__A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCamelCase = np.array(__A )
__UpperCamelCase = color_quantize(__A , __A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCamelCase = images.shape[0]
__UpperCamelCase = images.reshape(__A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCamelCase = list(__A )
else:
__UpperCamelCase = [to_channel_dimension_format(__A , __A ) for image in images]
__UpperCamelCase = {'input_ids': images}
return BatchFeature(data=__A , tensor_type=__A )
| 53 |
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
order.append(SCREAMING_SNAKE_CASE__ )
return order
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return component
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ):
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
__UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1]
if not visited[vert]:
__UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
components_list.append(SCREAMING_SNAKE_CASE__ )
return components_list
| 62 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
__SCREAMING_SNAKE_CASE = len(bin(lowerCAmelCase_ )[3:] )
__SCREAMING_SNAKE_CASE = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase_ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_A = {'vinai/bartpho-syllable': 1024}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =monolingual_vocab_file
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase ={}
__UpperCamelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =cnt
cnt += 1
with open(A_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__UpperCamelCase =line.strip().split()[0]
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
__UpperCamelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> List[str]:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Any:
return len(self.fairseq_ids_to_tokens )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self , A_ ) -> int:
return self.fairseq_ids_to_tokens[index]
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , A_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(A_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 62 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCamelCase_ = len(UpperCamelCase ) - 1
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase_ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCamelCase ) , 5 ) == 1
return output_values
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase_ = self.basis_function(UpperCamelCase )
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case ( self , UpperCamelCase = 0.01 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
lowerCamelCase_ = [] # x coordinates of points to plot
lowerCamelCase_ = [] # y coordinates of points to plot
lowerCamelCase_ = 0.0
while t <= 1:
lowerCamelCase_ = self.bezier_curve_function(UpperCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCamelCase_ = [i[0] for i in self.list_of_points]
lowerCamelCase_ = [i[1] for i in self.list_of_points]
plt.plot(
UpperCamelCase , UpperCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(UpperCamelCase , UpperCamelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 55 |
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None:
super().__init__(**A_ )
__UpperCamelCase =size if size is not None else {'shortest_edge': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
__UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' )
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =resample
__UpperCamelCase =do_center_crop
__UpperCamelCase =crop_size
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_normalize
__UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase =do_convert_rgb
def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image:
__UpperCamelCase =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase =size if size is not None else self.size
__UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ )
__UpperCamelCase =resample if resample is not None else self.resample
__UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase =crop_size if crop_size is not None else self.crop_size
__UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ )
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase =image_mean if image_mean is not None else self.image_mean
__UpperCamelCase =image_std if image_std is not None else self.image_std
__UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase =make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase =[convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(A_ ) for image in images]
if do_resize:
__UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
__UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 62 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
A : Dict = None
A : List[Any] = logging.get_logger(__name__)
A : int = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A : Union[str, Any] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
A : List[str] = {
"facebook/nllb-large-en-ro": 1_0_2_4,
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
A : Optional[int] = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any =["""input_ids""", """attention_mask"""]
__UpperCAmelCase : str =NllbTokenizer
__UpperCAmelCase : List[int] =[]
__UpperCAmelCase : List[int] =[]
def __init__( self , __a=None , __a=None , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=None , __a=None , __a=None , __a=False , **__a , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__lowerCAmelCase = legacy_behaviour
super().__init__(
vocab_file=__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , legacy_behaviour=__a , **__a , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = False if not self.vocab_file else True
__lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
__lowerCAmelCase = {
lang_code: self.convert_tokens_to_ids(__a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowerCAmelCase = src_lang if src_lang is not None else "eng_Latn"
__lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang )
__lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case ( self ):
return self._src_lang
@src_lang.setter
def snake_case ( self , __a ):
__lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case ( self , __a , __a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self , __a , __a , __a , __a , **__a ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__lowerCAmelCase = src_lang
__lowerCAmelCase = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
__lowerCAmelCase = self.convert_tokens_to_ids(__a )
__lowerCAmelCase = tgt_lang_id
return inputs
def snake_case ( self , __a , __a = "eng_Latn" , __a = None , __a = "fra_Latn" , **__a , ):
__lowerCAmelCase = src_lang
__lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def snake_case ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case ( self , __a ):
__lowerCAmelCase = self.convert_tokens_to_ids(__a )
if self.legacy_behaviour:
__lowerCAmelCase = []
__lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCAmelCase = [self.cur_lang_code]
__lowerCAmelCase = [self.eos_token_id]
__lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case ( self , __a ):
__lowerCAmelCase = self.convert_tokens_to_ids(__a )
if self.legacy_behaviour:
__lowerCAmelCase = []
__lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCAmelCase = [self.cur_lang_code]
__lowerCAmelCase = [self.eos_token_id]
__lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case ( self , __a , __a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
__lowerCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 57 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "yolos"
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
__UpperCamelCase =num_detection_tokens
__UpperCamelCase =use_mid_position_embeddings
__UpperCamelCase =auxiliary_loss
# Hungarian matcher
__UpperCamelCase =class_cost
__UpperCamelCase =bbox_cost
__UpperCamelCase =giou_cost
# Loss coefficients
__UpperCamelCase =bbox_loss_coefficient
__UpperCamelCase =giou_loss_coefficient
__UpperCamelCase =eos_coefficient
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self ) -> float:
return 1E-4
@property
def _a ( self ) -> int:
return 12
| 62 | 0 |
'''simple docstring'''
lowercase_ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
lowercase_ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
lowercase_ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
lowercase_ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
lowercase_ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
lowercase_ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
lowercase_ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
lowercase_ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 58 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( A_ ):
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , "tf_padding" ) )
self.parent.assertTrue(hasattr(snake_case__ , "depth_multiplier" ) )
class UpperCAmelCase :
def __init__(self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Any=13 , snake_case__ : int=3 , snake_case__ : List[str]=32 , snake_case__ : Any=0.25 , snake_case__ : List[Any]=8 , snake_case__ : List[str]=True , snake_case__ : Any=10_24 , snake_case__ : List[Any]=32 , snake_case__ : Optional[int]="relu6" , snake_case__ : List[str]=0.1 , snake_case__ : Any=0.02 , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=True , snake_case__ : Tuple=10 , snake_case__ : Tuple=None , ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = parent
snake_case : List[str] = batch_size
snake_case : Any = num_channels
snake_case : List[Any] = image_size
snake_case : List[str] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Optional[Any] = tf_padding
snake_case : Tuple = int(last_hidden_size * depth_multiplier )
snake_case : List[Any] = output_stride
snake_case : Union[str, Any] = hidden_act
snake_case : Optional[Any] = classifier_dropout_prob
snake_case : Dict = use_labels
snake_case : Union[str, Any] = is_training
snake_case : Optional[int] = num_labels
snake_case : Optional[Any] = initializer_range
snake_case : str = scope
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Optional[int] = None
snake_case : int = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.num_labels )
snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = MobileNetVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : int = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : int = self.num_labels
snake_case : Tuple = MobileNetVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : str = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : List[Any] = config_and_inputs
snake_case : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ):
A__ : str = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[str] = False
A__ : List[str] = False
A__ : Union[str, Any] = False
A__ : Optional[int] = False
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = MobileNetVaModelTester(self )
snake_case : int = MobileNetVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(snake_case__ )
snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(snake_case__ : str , snake_case__ : Dict , snake_case__ : int ):
snake_case : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
snake_case : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
snake_case : Optional[int] = outputs.hidden_states
snake_case : int = 26
self.assertEqual(len(snake_case__ ) , snake_case__ )
snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[str] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Union[str, Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Any ) -> Union[str, Any]:
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : str = MobileNetVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCamelCase ( ):
snake_case : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(snake_case__ )
snake_case : Union[str, Any] = self.default_image_processor
snake_case : Dict = prepare_img()
snake_case : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
snake_case : List[Any] = model(**snake_case__ )
# verify the logits
snake_case : Optional[int] = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , snake_case__ )
snake_case : List[Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 59 |
from __future__ import annotations
import math
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> None:
__UpperCamelCase =size
# approximate the overall size of segment tree with given value
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
# create array to store lazy update
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
__UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update
def _a ( self , A_ ) -> int:
return idx * 2
def _a ( self , A_ ) -> int:
return idx * 2 + 1
def _a ( self , A_ , A_ , A_ , A_ ) -> None:
if left_element == right_element:
__UpperCamelCase =a[left_element - 1]
else:
__UpperCamelCase =(left_element + right_element) // 2
self.build(self.left(A_ ) , A_ , A_ , A_ )
self.build(self.right(A_ ) , mid + 1 , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__UpperCamelCase =val
if left_element != right_element:
__UpperCamelCase =val
__UpperCamelCase =val
__UpperCamelCase =True
__UpperCamelCase =True
return True
__UpperCamelCase =(left_element + right_element) // 2
self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ )
self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
return True
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__UpperCamelCase =(left_element + right_element) // 2
__UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ )
__UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ )
return max(A_ , A_ )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_A = 15
_A = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 62 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_( a__ ):
def __init__( self : List[Any] , UpperCamelCase_ : CLIPSegForImageSegmentation , UpperCamelCase_ : CLIPSegProcessor , UpperCamelCase_ : AutoencoderKL , UpperCamelCase_ : CLIPTextModel , UpperCamelCase_ : CLIPTokenizer , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase_ : StableDiffusionSafetyChecker , UpperCamelCase_ : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
lowerCAmelCase : List[str] = (
F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = dict(scheduler.config )
lowerCAmelCase : List[str] = 1
lowerCAmelCase : Any = FrozenDict(UpperCamelCase_ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
lowerCAmelCase : Optional[int] = (
F'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = dict(scheduler.config )
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = FrozenDict(UpperCamelCase_ )
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=UpperCamelCase_ , segmentation_processor=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
self.enable_attention_slicing(UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCAmelCase : Dict = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self : Dict ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Any , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase_ : str , UpperCamelCase_ : int = 5_1_2 , UpperCamelCase_ : int = 5_1_2 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : float = 7.5 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , **UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
lowerCAmelCase : int = self.segmentation_model(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCAmelCase : Optional[int] = self.numpy_to_pil(UpperCamelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCAmelCase : str = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , height=UpperCamelCase_ , width=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , latents=UpperCamelCase_ , output_type=UpperCamelCase_ , return_dict=UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=UpperCamelCase_ , )
| 60 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ):
__UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' )
__UpperCamelCase =soup.find_all('td' , attrs='titleColumn' )
__UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ):
__UpperCamelCase =get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file:
__UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | 0 |
"""simple docstring"""
# Imports
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
if red is not None:
UpperCAmelCase_ : Union[str, Any] = red
if green is not None:
UpperCAmelCase_ : Dict = green
if blue is not None:
UpperCAmelCase_ : Optional[int] = blue
if red_edge is not None:
UpperCAmelCase_ : Optional[int] = red_edge
if nir is not None:
UpperCAmelCase_ : str = nir
return True
def UpperCamelCase__ ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
UpperCAmelCase_ : str = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCamelCase__ ( self , lowercase_=0.08 , lowercase_=1.22 , lowercase_=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir - self.green
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCamelCase__ ( self , lowercase_=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCamelCase__ ( self , lowercase_=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCAmelCase_ : Optional[Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 61 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip_vision_model"
def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =qkv_bias
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "instructblip_qformer"
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =cross_attention_frequency
__UpperCamelCase =encoder_hidden_size
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip"
UpperCAmelCase__ : Optional[Any] = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]:
super().__init__(**A_ )
if vision_config is None:
__UpperCamelCase ={}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__UpperCamelCase ={}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__UpperCamelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCamelCase =InstructBlipVisionConfig(**A_ )
__UpperCamelCase =InstructBlipQFormerConfig(**A_ )
__UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ )
__UpperCamelCase =self.text_config.tie_word_embeddings
__UpperCamelCase =self.text_config.is_encoder_decoder
__UpperCamelCase =num_query_tokens
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase =1.0
__UpperCamelCase =0.02
@classmethod
def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.qformer_config.to_dict()
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 62 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =StableUnCLIPImgaImgPipeline
__a =TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__a =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a =frozenset([] )
def UpperCamelCase__ ( self : Dict ):
_a = 32
_a = embedder_hidden_size
# image encoding components
_a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__a , projection_dim=__a , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_a = StableUnCLIPImageNormalizer(embedding_dim=__a )
_a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
_a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__a , layers_per_block=1 , upcast_attention=__a , use_linear_projection=__a , )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__a , steps_offset=1 , )
torch.manual_seed(0 )
_a = AutoencoderKL()
_a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[Any] , __a : List[Any]=0 , __a : Dict=True ):
if str(__a ).startswith("mps" ):
_a = torch.manual_seed(__a )
else:
_a = torch.Generator(device=__a ).manual_seed(__a )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if pil_image:
_a = input_image * 0.5 + 0.5
_a = input_image.clamp(0 , 1 )
_a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = DiffusionPipeline.numpy_to_pil(__a )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self : Optional[Any] ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableUnCLIPImgaImgPipeline(**__a )
_a = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_a = self.get_dummy_inputs(__a )
inputs.update({"image_embeds": None} )
_a = sd_pipe(**__a ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self : List[Any] ):
_a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=__a )
def UpperCamelCase__ ( self : Any ):
_a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__a )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__a )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : str ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
_a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a = pipe(__a , "anime turle" , generator=__a , output_type="np" )
_a = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
_a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a = pipe(__a , "anime turle" , generator=__a , output_type="np" )
_a = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__a , __a )
def UpperCamelCase__ ( self : Any ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
_a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_a = pipe(
__a , "anime turtle" , num_inference_steps=2 , output_type="np" , )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 63 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(rows * cols * num_images )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
__UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =labels_dense.shape[0]
__UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
__UpperCamelCase =numpy.zeros((num_labels, num_classes) )
__UpperCamelCase =1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
A_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__UpperCamelCase =10000
__UpperCamelCase =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCamelCase =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCamelCase =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCamelCase =images.astype(numpy.floataa )
__UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 )
__UpperCamelCase =images
__UpperCamelCase =labels
__UpperCamelCase =0
__UpperCamelCase =0
@property
def _a ( self ) -> Tuple:
return self._images
@property
def _a ( self ) -> Union[str, Any]:
return self._labels
@property
def _a ( self ) -> Optional[Any]:
return self._num_examples
@property
def _a ( self ) -> List[str]:
return self._epochs_completed
def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]:
if fake_data:
__UpperCamelCase =[1] * 784
__UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
__UpperCamelCase =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perma]
__UpperCamelCase =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCamelCase =self._num_examples - start
__UpperCamelCase =self._images[start : self._num_examples]
__UpperCamelCase =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perm]
__UpperCamelCase =self.labels[perm]
# Start next epoch
__UpperCamelCase =0
__UpperCamelCase =batch_size - rest_num_examples
__UpperCamelCase =self._index_in_epoch
__UpperCamelCase =self._images[start:end]
__UpperCamelCase =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCamelCase =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
__UpperCamelCase =f.size()
print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =fake()
__UpperCamelCase =fake()
__UpperCamelCase =fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
__UpperCamelCase =DEFAULT_SOURCE_URL
__UpperCamelCase ='train-images-idx3-ubyte.gz'
__UpperCamelCase ='train-labels-idx1-ubyte.gz'
__UpperCamelCase ='t10k-images-idx3-ubyte.gz'
__UpperCamelCase ='t10k-labels-idx1-ubyte.gz'
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =(
'Validation size should be between 0 and '
F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =train_images[:validation_size]
__UpperCamelCase =train_labels[:validation_size]
__UpperCamelCase =train_images[validation_size:]
__UpperCamelCase =train_labels[validation_size:]
__UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed}
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 62 | 0 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ = logging.get_logger(__name__)
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "linear"
lowercase__ = "cosine"
lowercase__ = "cosine_with_restarts"
lowercase__ = "polynomial"
lowercase__ = "constant"
lowercase__ = "constant_with_warmup"
lowercase__ = "piecewise_constant"
def UpperCAmelCase__ (snake_case__ : Optimizer , snake_case__ : int = -1 ):
"""simple docstring"""
return LambdaLR(snake_case__ , lambda snake_case__ : 1 , last_epoch=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Optimizer , snake_case__ : int , snake_case__ : int = -1 ):
"""simple docstring"""
def lr_lambda(snake_case__ : int ):
if current_step < num_warmup_steps:
return float(snake_case__ ) / float(max(1.0 , snake_case__ ) )
return 1.0
return LambdaLR(snake_case__ , snake_case__ , last_epoch=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Optimizer , snake_case__ : str , snake_case__ : int = -1 ):
"""simple docstring"""
_snake_case : str = {}
_snake_case : int = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
_snake_case , _snake_case : Any = rule_str.split(""":""" )
_snake_case : Optional[int] = int(snake_case__ )
_snake_case : str = float(snake_case__ )
_snake_case : Optional[int] = value
_snake_case : Optional[Any] = float(rule_list[-1] )
def create_rules_function(snake_case__ : Optional[Any] , snake_case__ : str ):
def rule_func(snake_case__ : int ) -> float:
_snake_case : Optional[int] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(snake_case__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : List[str] = create_rules_function(snake_case__ , snake_case__ )
return LambdaLR(snake_case__ , snake_case__ , last_epoch=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[Any]=-1 ):
"""simple docstring"""
def lr_lambda(snake_case__ : int ):
if current_step < num_warmup_steps:
return float(snake_case__ ) / float(max(1 , snake_case__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Optimizer , snake_case__ : int , snake_case__ : int , snake_case__ : float = 0.5 , snake_case__ : int = -1 ):
"""simple docstring"""
def lr_lambda(snake_case__ : Dict ):
if current_step < num_warmup_steps:
return float(snake_case__ ) / float(max(1 , snake_case__ ) )
_snake_case : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case__ ) * 2.0 * progress )) )
return LambdaLR(snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Optimizer , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , snake_case__ : int = -1 ):
"""simple docstring"""
def lr_lambda(snake_case__ : Dict ):
if current_step < num_warmup_steps:
return float(snake_case__ ) / float(max(1 , snake_case__ ) )
_snake_case : str = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case__ ) * progress) % 1.0) )) )
return LambdaLR(snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : int=1e-7 , snake_case__ : Optional[Any]=1.0 , snake_case__ : Any=-1 ):
"""simple docstring"""
_snake_case : str = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(snake_case__ : int ):
if current_step < num_warmup_steps:
return float(snake_case__ ) / float(max(1 , snake_case__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : List[str] = num_training_steps - num_warmup_steps
_snake_case : Any = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(snake_case__ , snake_case__ , snake_case__ )
A_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCAmelCase__ (snake_case__ : Union[str, SchedulerType] , snake_case__ : Optimizer , snake_case__ : Optional[str] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 1 , snake_case__ : float = 1.0 , snake_case__ : int = -1 , ):
"""simple docstring"""
_snake_case : List[Any] = SchedulerType(snake_case__ )
_snake_case : Optional[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(snake_case__ , last_epoch=snake_case__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(snake_case__ , step_rules=snake_case__ , last_epoch=snake_case__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(snake_case__ , num_warmup_steps=snake_case__ , last_epoch=snake_case__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , num_cycles=snake_case__ , last_epoch=snake_case__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , power=snake_case__ , last_epoch=snake_case__ , )
return schedule_func(
snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , last_epoch=snake_case__ )
| 64 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = TransfoXLTokenizer
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
def _a ( self ) -> Union[str, Any]:
super().setUp()
__UpperCamelCase =[
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _a ( self , **A_ ) -> Optional[int]:
__UpperCamelCase =True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='<unk> UNwanted , running'
__UpperCamelCase ='<unk> unwanted, running'
return input_text, output_text
def _a ( self ) -> str:
__UpperCamelCase =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ )
__UpperCamelCase =tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] )
def _a ( self ) -> Any:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> int:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
__UpperCamelCase ='Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
__UpperCamelCase =[
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =len(A_ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 62 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False, False, False
@dataclass
class A :
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] = "dict"
__UpperCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase : str = field(default='Audio' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def __call__(self : int ) -> Dict:
"""simple docstring"""
return self.pa_type
def lowercase_ (self : int , __UpperCAmelCase : Union[str, bytes, dict] ) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ = BytesIO()
sf.write(__UpperCAmelCase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
UpperCAmelCase__ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2_7_6_7
UpperCAmelCase__ = BytesIO(bytes() )
sf.write(__UpperCAmelCase , __UpperCAmelCase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def lowercase_ (self : int , __UpperCAmelCase : dict , __UpperCAmelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase__ , UpperCAmelCase__ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase__ = xsplitext(__UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase__ = token_per_repo_id or {}
UpperCAmelCase__ = path.split("::" )[-1]
try:
UpperCAmelCase__ = string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ = None
with xopen(__UpperCAmelCase , "rb" , use_auth_token=__UpperCAmelCase ) as f:
UpperCAmelCase__ , UpperCAmelCase__ = sf.read(__UpperCAmelCase )
else:
UpperCAmelCase__ , UpperCAmelCase__ = sf.read(__UpperCAmelCase )
UpperCAmelCase__ = array.T
if self.mono:
UpperCAmelCase__ = librosa.to_mono(__UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ = librosa.resample(__UpperCAmelCase , orig_sr=__UpperCAmelCase , target_sr=self.sampling_rate )
UpperCAmelCase__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowercase_ (self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowercase_ (self : List[Any] , __UpperCAmelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
UpperCAmelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase__ = pa.array([Audio().encode_example(__UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase__ = storage.field("bytes" )
else:
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase__ = storage.field("path" )
else:
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def lowercase_ (self : Dict , __UpperCAmelCase : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(__UpperCAmelCase : Any ):
with xopen(__UpperCAmelCase , "rb" ) as f:
UpperCAmelCase__ = f.read()
return bytes_
UpperCAmelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
| 65 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
"""simple docstring"""
from __future__ import annotations
__a = tuple[int, int, int]
__a = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__a = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
__a = "EGZWVONAHDCLFQMSIPJBYUKXTR"
__a = "FOBHMDKEXQNRAULPGSJVTYICZW"
__a = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
__a = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
__a = "RMDJXFUWGISLHVTCQNKYPBEZOA"
__a = "SGLCPQWZHKXAREONTFBVIYJUDM"
__a = "HVSICLTYKQUBXDWAJZOMFGPREN"
__a = "RZWQHFMVDBKICJLNTUXAGYPSOE"
__a = "LFKIJODBEGAMQPXVUHYSTCZRWN"
__a = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if (unique_rotsel := len(set(_lowercase ) )) < 3:
snake_case_ :Any = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_lowercase )
# Checks if rotor positions are valid
snake_case_, snake_case_, snake_case_ :int = rotpos
if not 0 < rotorposa <= len(_lowercase ):
snake_case_ :List[Any] = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
snake_case_ :Tuple = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
snake_case_ :str = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowercase )
# Validates string and returns dict
snake_case_ :Optional[Any] = _plugboard(_lowercase )
return rotpos, rotsel, pbdict
def A_ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase, _lowercase ):
snake_case_ :int = f"""Plugboard setting isn't type string ({type(_lowercase )})"""
raise TypeError(_lowercase )
elif len(_lowercase ) % 2 != 0:
snake_case_ :List[Any] = f"""Odd number of symbols ({len(_lowercase )})"""
raise Exception(_lowercase )
elif pbstring == "":
return {}
pbstring.replace(""" """, """""" )
# Checks if all characters are unique
snake_case_ :List[str] = set()
for i in pbstring:
if i not in abc:
snake_case_ :Dict = f"""'{i}' not in list of symbols"""
raise Exception(_lowercase )
elif i in tmppbl:
snake_case_ :Dict = f"""Duplicate symbol ({i})"""
raise Exception(_lowercase )
else:
tmppbl.add(_lowercase )
del tmppbl
# Created the dictionary
snake_case_ :int = {}
for j in range(0, len(_lowercase ) - 1, 2 ):
snake_case_ :Dict = pbstring[j + 1]
snake_case_ :List[Any] = pbstring[j]
return pb
def A_ ( _lowercase, _lowercase, _lowercase = (rotora, rotora, rotora), _lowercase = "", ):
'''simple docstring'''
snake_case_ :Tuple = text.upper()
snake_case_, snake_case_, snake_case_ :Tuple = _validator(
_lowercase, _lowercase, plugb.upper() )
snake_case_, snake_case_, snake_case_ :int = rotor_position
snake_case_, snake_case_, snake_case_ :Tuple = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
snake_case_ :int = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
snake_case_ :Any = plugboard[symbol]
# rotor ra --------------------------
snake_case_ :Optional[int] = abc.index(_lowercase ) + rotorposa
snake_case_ :Any = rotora[index % len(_lowercase )]
# rotor rb --------------------------
snake_case_ :List[Any] = abc.index(_lowercase ) + rotorposa
snake_case_ :int = rotora[index % len(_lowercase )]
# rotor rc --------------------------
snake_case_ :int = abc.index(_lowercase ) + rotorposa
snake_case_ :List[Any] = rotora[index % len(_lowercase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
snake_case_ :Union[str, Any] = reflector[symbol]
# 2nd rotors
snake_case_ :int = abc[rotora.index(_lowercase ) - rotorposa]
snake_case_ :Dict = abc[rotora.index(_lowercase ) - rotorposa]
snake_case_ :Union[str, Any] = abc[rotora.index(_lowercase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
snake_case_ :int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowercase ):
snake_case_ :List[Any] = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
snake_case_ :str = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
snake_case_ :Union[str, Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
__a = "This is my Python script that emulates the Enigma machine from WWII."
__a = (1, 1, 1)
__a = "pictures"
__a = (rotora, rotora, rotora)
__a = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 66 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return np.sum(outputs == labels )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f:
__UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
next(SCREAMING_SNAKE_CASE__ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
for dataset in encoded_datasets:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =mc_label
__UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) )
return tensor_datasets
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
print(SCREAMING_SNAKE_CASE__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCamelCase =['_start_', '_delimiter_', '_classify_']
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
model.to(SCREAMING_SNAKE_CASE__ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj]
logger.info('Encoding dataset...' )
__UpperCamelCase =load_rocstories_dataset(args.train_dataset )
__UpperCamelCase =load_rocstories_dataset(args.eval_dataset )
__UpperCamelCase =(train_dataset, eval_dataset)
__UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ )
# Compute the max input length for the Transformer
__UpperCamelCase =model.config.n_positions // 2 - 2
__UpperCamelCase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1]
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size )
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCamelCase =args.max_steps
__UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCamelCase =list(model.named_parameters() )
__UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__UpperCamelCase =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCamelCase =get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
if args.do_train:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCamelCase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE__ )
if args.do_eval:
model.eval()
__UpperCamelCase , __UpperCamelCase =0, 0
__UpperCamelCase , __UpperCamelCase =0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model(
SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =mc_logits.detach().cpu().numpy()
__UpperCamelCase =mc_labels.to('cpu' ).numpy()
__UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCamelCase =eval_loss / nb_eval_steps
__UpperCamelCase =eval_accuracy / nb_eval_examples
__UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None
__UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 62 | 0 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
__lowerCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
__lowerCamelCase = 1
__lowerCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
__lowerCamelCase = init[0]
__lowerCamelCase = init[1]
__lowerCamelCase = 0
__lowerCamelCase = g + heuristic[x][y] # cost from starting cell to destination cell
__lowerCamelCase = [[f, g, x, y]]
__lowerCamelCase = False # flag that is set when search is complete
__lowerCamelCase = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowerCamelCase = cell.pop()
__lowerCamelCase = next_cell[2]
__lowerCamelCase = next_cell[3]
__lowerCamelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowerCamelCase = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
__lowerCamelCase = x + DIRECTIONS[i][0]
__lowerCamelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowerCamelCase = g + cost
__lowerCamelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowerCamelCase = 1
__lowerCamelCase = i
__lowerCamelCase = []
__lowerCamelCase = goal[0]
__lowerCamelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowerCamelCase = x - DIRECTIONS[action[x][y]][0]
__lowerCamelCase = y - DIRECTIONS[action[x][y]][1]
__lowerCamelCase = xa
__lowerCamelCase = ya
invpath.append([x, y] )
__lowerCamelCase = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase =[0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase =[len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase =1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase =9_9
__UpperCAmelCase , __UpperCAmelCase =search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 67 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ):
__UpperCamelCase =1
__UpperCamelCase =0
__UpperCamelCase =1
__UpperCamelCase =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(SCREAMING_SNAKE_CASE_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 68 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
A__ : Dict =logging.getLogger(__name__)
@dataclass(frozen=snake_case_ )
class UpperCAmelCase :
_lowercase: str
_lowercase: str
_lowercase: Optional[str] = None
_lowercase: Optional[str] = None
_lowercase: Optional[str] = None
@dataclass(frozen=snake_case_ )
class UpperCAmelCase :
_lowercase: List[int]
_lowercase: Optional[List[int]] = None
_lowercase: Optional[List[int]] = None
_lowercase: Optional[Union[int, float]] = None
_lowercase: Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCAmelCase ( snake_case_ ):
_lowercase: List[InputFeatures]
def __init__( self : str , __snake_case : str , __snake_case : PreTrainedTokenizer , __snake_case : str , __snake_case : Optional[int] = None , __snake_case : int=False , __snake_case : bool = False , ) -> Optional[int]:
_lowerCAmelCase = hans_processors[task]()
_lowerCAmelCase = os.path.join(
__snake_case , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(__snake_case ) , __snake_case , ) , )
_lowerCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCAmelCase , _lowerCAmelCase = label_list[2], label_list[1]
_lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__snake_case ):
if os.path.exists(__snake_case ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_lowerCAmelCase = torch.load(__snake_case )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_lowerCAmelCase = (
processor.get_dev_examples(__snake_case ) if evaluate else processor.get_train_examples(__snake_case )
)
logger.info("""Training examples: %s""" , len(__snake_case ) )
_lowerCAmelCase = hans_convert_examples_to_features(__snake_case , __snake_case , __snake_case , __snake_case )
logger.info("""Saving features into cached file %s""" , __snake_case )
torch.save(self.features , __snake_case )
def __len__( self : List[str] ) -> List[Any]:
return len(self.features )
def __getitem__( self : Union[str, Any] , __snake_case : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def lowercase__ ( self : List[Any] ) -> int:
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase :
_lowercase: List[InputFeatures]
def __init__( self : List[Any] , __snake_case : str , __snake_case : PreTrainedTokenizer , __snake_case : str , __snake_case : Optional[int] = 1_28 , __snake_case : Dict=False , __snake_case : bool = False , ) -> Union[str, Any]:
_lowerCAmelCase = hans_processors[task]()
_lowerCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCAmelCase , _lowerCAmelCase = label_list[2], label_list[1]
_lowerCAmelCase = label_list
_lowerCAmelCase = processor.get_dev_examples(__snake_case ) if evaluate else processor.get_train_examples(__snake_case )
_lowerCAmelCase = hans_convert_examples_to_features(__snake_case , __snake_case , __snake_case , __snake_case )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(__snake_case )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_lowerCAmelCase = tf.data.Dataset.from_generator(
__snake_case , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
return self.dataset
def __len__( self : Any ) -> List[Any]:
return len(self.features )
def __getitem__( self : Any , __snake_case : Optional[Any] ) -> InputFeatures:
return self.features[i]
def lowercase__ ( self : Dict ) -> str:
return self.label_list
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : Optional[Any] , __snake_case : str ) -> int:
return self._create_examples(self._read_tsv(os.path.join(__snake_case , """heuristics_train_set.txt""" ) ) , """train""" )
def lowercase__ ( self : str , __snake_case : List[Any] ) -> Optional[int]:
return self._create_examples(self._read_tsv(os.path.join(__snake_case , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def lowercase__ ( self : int ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def lowercase__ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[Any] ) -> int:
_lowerCAmelCase = []
for i, line in enumerate(__snake_case ):
if i == 0:
continue
_lowerCAmelCase = """%s-%s""" % (set_type, line[0])
_lowerCAmelCase = line[5]
_lowerCAmelCase = line[6]
_lowerCAmelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
_lowerCAmelCase = line[0]
examples.append(InputExample(guid=__snake_case , text_a=__snake_case , text_b=__snake_case , label=__snake_case , pairID=__snake_case ) )
return examples
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
_lowerCAmelCase = {label: i for i, label in enumerate(lowerCAmelCase )}
_lowerCAmelCase = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d""" % (ex_index) )
_lowerCAmelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCAmelCase , max_length=lowerCAmelCase , padding="""max_length""" , truncation=lowerCAmelCase , return_overflowing_tokens=lowerCAmelCase , )
_lowerCAmelCase = label_map[example.label] if example.label in label_map else 0
_lowerCAmelCase = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase , label=lowerCAmelCase , pairID=lowerCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
A__ : List[Any] ={
'''hans''': 3,
}
A__ : Any ={
'''hans''': HansProcessor,
}
| 70 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A ( a_ ,a_ ,a_ ) -> Tuple:
# Construct model
if openai_config_file == "":
__UpperCamelCase : int =OpenAIGPTConfig()
else:
__UpperCamelCase : Any =OpenAIGPTConfig.from_json_file(a_ )
__UpperCamelCase : Any =OpenAIGPTModel(a_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a_ ,a_ ,a_ )
# Save pytorch-model
__UpperCamelCase : Optional[Any] =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCamelCase : Optional[Any] =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() ,a_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
A_ :Optional[int] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 71 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ ) -> float:
return 0.0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show()
| 62 | 0 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': 512,
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[Any] = BlenderbotSmallTokenizer
def __init__( self : List[str] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : str="<|endoftext|>" , __lowerCAmelCase : List[Any]="<|endoftext|>" , __lowerCAmelCase : Optional[Any]="<|endoftext|>" , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : str=True , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=__lowerCAmelCase , merges=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , ) , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : List[Any] = add_prefix_space
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : str = [self.sep_token_id]
_lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a ={"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 73 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "mvp"
UpperCAmelCase__ : Tuple = ["past_key_values"]
UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =classifier_dropout
__UpperCamelCase =use_cache
__UpperCamelCase =encoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase =use_prompt
__UpperCamelCase =prompt_length
__UpperCamelCase =prompt_mid_dim
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
__UpperCamelCase =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 62 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : Union[str, Any] ,*A_ : Tuple ,**A_ : Optional[int] ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[int] ,**A_ : Tuple ) -> Optional[int]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : List[Any] ,**A_ : Dict ) -> int:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Union[str, Any] ) -> Optional[int]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[Any] ,**A_ : List[str] ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Union[str, Any] ,**A_ : Dict ) -> List[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Any ,*A_ : List[Any] ,**A_ : Dict ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Dict ,**A_ : Tuple ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : str ,**A_ : List[str] ) -> List[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : Any ,*A_ : Union[str, Any] ,**A_ : List[Any] ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> List[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : List[str] ,**A_ : List[Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = ['''torch''']
def __init__( self : Optional[int] ,*A_ : str ,**A_ : List[str] ) -> List[str]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> List[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : List[Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : str ,*A_ : Optional[Any] ,**A_ : Optional[int] ) -> List[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : List[str] ,**A_ : Optional[int] ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : int ,**A_ : int ) -> str:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : List[str] ,*A_ : Union[str, Any] ,**A_ : Optional[int] ) -> int:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : List[Any] ,**A_ : str ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = ['''torch''']
def __init__( self : List[Any] ,*A_ : int ,**A_ : int ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : int ,**A_ : Tuple ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Optional[Any] ,**A_ : Tuple ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = ['''torch''']
def __init__( self : List[str] ,*A_ : int ,**A_ : str ) -> Union[str, Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[Any] ,**A_ : Optional[int] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Tuple ,**A_ : Any ) -> Any:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : Any ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : str ,**A_ : List[str] ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : Any ) -> int:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Tuple ,*A_ : Tuple ,**A_ : Tuple ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Tuple ) -> Tuple:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Union[str, Any] ,**A_ : Dict ) -> str:
requires_backends(cls ,['torch'] )
def _snake_case ( *snake_case__ : Any , **snake_case__ : Dict ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : str , **snake_case__ : str ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : int , **snake_case__ : List[Any] ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : List[str] , **snake_case__ : Any ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : Any , **snake_case__ : Any ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : int , **snake_case__ : Tuple ):
requires_backends(snake_case__ , ['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : Optional[Any] ,*A_ : Optional[int] ,**A_ : Dict ) -> Dict:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Any ,**A_ : List[Any] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Optional[int] ,**A_ : Union[str, Any] ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : Tuple ,*A_ : List[str] ,**A_ : List[Any] ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Optional[int] ,**A_ : Union[str, Any] ) -> List[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Tuple ,**A_ : Union[str, Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''torch''']
def __init__( self : str ,*A_ : Union[str, Any] ,**A_ : Any ) -> Tuple:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Optional[int] ,**A_ : List[Any] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : Optional[Any] ,*A_ : Any ,**A_ : Any ) -> Tuple:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[str] ,**A_ : Dict ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[str] ,**A_ : Optional[int] ) -> Dict:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Optional[int] ,*A_ : Any ,**A_ : List[str] ) -> Union[str, Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Any ,**A_ : List[str] ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : int ,**A_ : Dict ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : List[str] ,*A_ : Dict ,**A_ : Optional[int] ) -> Dict:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Dict ,**A_ : Any ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[Any] ,**A_ : Tuple ) -> int:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : List[str] ,*A_ : List[str] ,**A_ : int ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Tuple ,**A_ : Optional[int] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[Any] ,**A_ : Any ) -> Any:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : Optional[Any] ,*A_ : str ,**A_ : Optional[int] ) -> Dict:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : List[str] ,**A_ : Optional[Any] ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : List[str] ,**A_ : Tuple ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''torch''']
def __init__( self : str ,*A_ : Union[str, Any] ,**A_ : Dict ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[Any] ,**A_ : Any ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[str] ,**A_ : Optional[Any] ) -> int:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : int ,*A_ : int ,**A_ : Union[str, Any] ) -> str:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : int ,**A_ : List[Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Union[str, Any] ,**A_ : Any ) -> Dict:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = ['''torch''']
def __init__( self : str ,*A_ : Optional[Any] ,**A_ : List[str] ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Optional[Any] ,**A_ : str ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : Tuple ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''torch''']
def __init__( self : List[Any] ,*A_ : List[str] ,**A_ : List[str] ) -> int:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Dict ,**A_ : int ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Tuple ,**A_ : Optional[Any] ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : Any ,*A_ : List[str] ,**A_ : Any ) -> Tuple:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Tuple ,**A_ : List[Any] ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : List[str] ,**A_ : Optional[Any] ) -> List[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''torch''']
def __init__( self : Tuple ,*A_ : List[str] ,**A_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Any ,**A_ : Optional[int] ) -> List[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Dict ,**A_ : Optional[int] ) -> str:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : Any ,*A_ : Optional[int] ,**A_ : List[Any] ) -> str:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : List[Any] ,**A_ : List[str] ) -> Tuple:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Union[str, Any] ,**A_ : Any ) -> Optional[int]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : List[Any] ,*A_ : Dict ,**A_ : Tuple ) -> Optional[int]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : int ,**A_ : Tuple ) -> Tuple:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : str ,**A_ : Dict ) -> str:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = ['''torch''']
def __init__( self : Tuple ,*A_ : Union[str, Any] ,**A_ : Optional[int] ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Tuple ,**A_ : Union[str, Any] ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Optional[int] ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = ['''torch''']
def __init__( self : Union[str, Any] ,*A_ : Dict ,**A_ : str ) -> Union[str, Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Union[str, Any] ,**A_ : List[Any] ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : str ,**A_ : Optional[Any] ) -> int:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : List[Any] ,*A_ : Optional[Any] ,**A_ : str ) -> str:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : List[str] ,**A_ : str ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : List[Any] ,**A_ : Tuple ) -> Dict:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : List[str] ,*A_ : int ,**A_ : Tuple ) -> List[str]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Dict ,**A_ : List[Any] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> Dict:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Union[str, Any] ,*A_ : Any ,**A_ : Dict ) -> List[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : int ) -> str:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : str ,**A_ : List[str] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : int ,*A_ : Any ,**A_ : Any ) -> int:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : List[Any] ,**A_ : Tuple ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Tuple ,**A_ : str ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = ['''torch''']
def __init__( self : Tuple ,*A_ : str ,**A_ : List[str] ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Any ,**A_ : Any ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : List[str] ,**A_ : Optional[Any] ) -> Any:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : List[Any] ,*A_ : str ,**A_ : Union[str, Any] ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Dict ,**A_ : Dict ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : List[Any] ,**A_ : str ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : Union[str, Any] ,*A_ : List[Any] ,**A_ : Optional[int] ) -> Optional[int]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Tuple ,**A_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Any ,**A_ : List[Any] ) -> Any:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = ['''torch''']
def __init__( self : str ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : List[str] ,**A_ : List[str] ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : List[str] ,**A_ : str ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''torch''']
def __init__( self : List[Any] ,*A_ : Dict ,**A_ : Any ) -> Dict:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : List[str] ,**A_ : List[Any] ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Dict ,**A_ : Optional[int] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''torch''']
def __init__( self : Optional[int] ,*A_ : List[Any] ,**A_ : int ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Optional[Any] ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : int ,*A_ : Optional[Any] ,**A_ : int ) -> Dict:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : Any ,**A_ : str ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Any ,**A_ : str ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''torch''']
def __init__( self : Optional[Any] ,*A_ : Tuple ,**A_ : Dict ) -> List[str]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Tuple ,**A_ : List[Any] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Optional[Any] ,**A_ : List[str] ) -> List[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : List[str] ,*A_ : Dict ,**A_ : List[Any] ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[Any] ,**A_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : List[str] ,*A_ : Any ,**A_ : int ) -> int:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Tuple ,**A_ : Tuple ) -> Optional[int]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[Any] ,**A_ : str ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = ['''torch''']
def __init__( self : Dict ,*A_ : Tuple ,**A_ : Tuple ) -> List[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : int ,**A_ : Optional[Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : str ,**A_ : List[Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = ['''torch''']
def __init__( self : Tuple ,*A_ : Any ,**A_ : Tuple ) -> List[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : List[Any] ,**A_ : Optional[Any] ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Dict ,**A_ : Dict ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Any ,*A_ : Dict ,**A_ : Dict ) -> Tuple:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[Any] ,**A_ : Union[str, Any] ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Dict ,**A_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = ['''torch''']
def __init__( self : List[Any] ,*A_ : Any ,**A_ : Union[str, Any] ) -> Tuple:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : List[Any] ,**A_ : str ) -> str:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Optional[int] ,**A_ : Optional[Any] ) -> str:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = ['''torch''']
def __init__( self : Optional[int] ,*A_ : List[str] ,**A_ : int ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Dict ,**A_ : List[Any] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Union[str, Any] ,**A_ : Any ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : str ,*A_ : Optional[Any] ,**A_ : Optional[Any] ) -> Optional[int]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Optional[int] ,**A_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : int ,**A_ : str ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : List[str] ,*A_ : Tuple ,**A_ : Tuple ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Optional[int] ,**A_ : int ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : str ,**A_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls ,['torch'] ) | 74 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = GPTaTokenizer
UpperCAmelCase__ : Any = GPTaTokenizerFast
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : int = {"add_prefix_space": True}
UpperCAmelCase__ : Any = False
def _a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase ={'unk_token': '<unk>'}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _a ( self , **A_ ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , **A_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='lower newer'
__UpperCamelCase ='lower newer'
return input_text, output_text
def _a ( self ) -> List[Any]:
__UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='lower newer'
__UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self ) -> int:
if not self.test_rust_tokenizer:
return
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase ='lower newer'
# Testing tokenization
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids without special tokens
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids with special tokens
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# Testing the unknown token
__UpperCamelCase =tokens + [rust_tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self , *A_ , **A_ ) -> Optional[int]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _a ( self , A_=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
def _a ( self ) -> int:
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__UpperCamelCase =tokenizer.pad_token_id
__UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
__UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ='$$$'
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ )
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =tokenizer.bos_token_id
__UpperCamelCase =tokenizer(A_ )
__UpperCamelCase =tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] , A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__UpperCamelCase =tokenizer.decode(out_s.input_ids )
__UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> Any:
# TODO: change to self.get_tokenizers() when the fast version is implemented
__UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='Encode this.'
__UpperCamelCase ='This one too please.'
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(
A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , )
__UpperCamelCase =encoded_sequence_dict['input_ids']
__UpperCamelCase =encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(A_ ) , len(A_ ) )
__UpperCamelCase =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
__UpperCamelCase =[x for x in filtered_sequence if x is not None]
self.assertEqual(A_ , A_ )
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
def _a ( self ) -> Dict:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# Same as above
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='bos'
__UpperCamelCase =tokenizer.get_vocab()['bos']
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# We changed the bos token
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
| 62 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : List[Any] = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def a_ ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
if weight_type is not None:
lowerCamelCase_ =getattr(__snake_case , __snake_case ).shape
else:
lowerCamelCase_ =hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase_ =value
elif weight_type == "weight_g":
lowerCamelCase_ =value
elif weight_type == "weight_v":
lowerCamelCase_ =value
elif weight_type == "bias":
lowerCamelCase_ =value
else:
lowerCamelCase_ =value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =fairseq_model.state_dict()
lowerCamelCase_ =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ =False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase_ =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ ='''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase_ =True
if "*" in mapped_key:
lowerCamelCase_ =name.split(__snake_case )[0].split('''.''' )[-2]
lowerCamelCase_ =mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
lowerCamelCase_ ='''weight_g'''
elif "weight_v" in name:
lowerCamelCase_ ='''weight_v'''
elif "weight" in name:
lowerCamelCase_ ='''weight'''
elif "bias" in name:
lowerCamelCase_ ='''bias'''
else:
lowerCamelCase_ =None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a_ ( __snake_case : Tuple , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =full_name.split('''conv_layers.''' )[-1]
lowerCamelCase_ =name.split('''.''' )
lowerCamelCase_ =int(items[0] )
lowerCamelCase_ =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase_ =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase_ =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase_ =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase_ =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =SEWConfig()
if is_finetuned:
lowerCamelCase_ =model.wav_encoder.wav_model.cfg
else:
lowerCamelCase_ =model.cfg
lowerCamelCase_ =fs_config.conv_bias
lowerCamelCase_ =eval(fs_config.conv_feature_layers )
lowerCamelCase_ =[x[0] for x in conv_layers]
lowerCamelCase_ =[x[1] for x in conv_layers]
lowerCamelCase_ =[x[2] for x in conv_layers]
lowerCamelCase_ ='''gelu'''
lowerCamelCase_ ='''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
lowerCamelCase_ =0.0
lowerCamelCase_ =fs_config.activation_fn.name
lowerCamelCase_ =fs_config.encoder_embed_dim
lowerCamelCase_ =0.0_2
lowerCamelCase_ =fs_config.encoder_ffn_embed_dim
lowerCamelCase_ =1e-5
lowerCamelCase_ =fs_config.encoder_layerdrop
lowerCamelCase_ =fs_config.encoder_attention_heads
lowerCamelCase_ =fs_config.conv_pos_groups
lowerCamelCase_ =fs_config.conv_pos
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =fs_config.encoder_layers
lowerCamelCase_ =fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCamelCase_ =model.cfg
lowerCamelCase_ =fs_config.final_dropout
lowerCamelCase_ =fs_config.layerdrop
lowerCamelCase_ =fs_config.activation_dropout
lowerCamelCase_ =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCamelCase_ =fs_config.attention_dropout
lowerCamelCase_ =fs_config.dropout_input
lowerCamelCase_ =fs_config.dropout
lowerCamelCase_ =fs_config.mask_channel_length
lowerCamelCase_ =fs_config.mask_channel_prob
lowerCamelCase_ =fs_config.mask_length
lowerCamelCase_ =fs_config.mask_prob
lowerCamelCase_ ='''Wav2Vec2FeatureExtractor'''
lowerCamelCase_ ='''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : str=None , __snake_case : str=None , __snake_case : Dict=True ) -> List[str]:
"""simple docstring"""
if is_finetuned:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCamelCase_ =SEWConfig.from_pretrained(__snake_case )
else:
lowerCamelCase_ =convert_config(model[0] , __snake_case )
lowerCamelCase_ =model[0].eval()
lowerCamelCase_ =True if config.feat_extract_norm == '''layer''' else False
lowerCamelCase_ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
if is_finetuned:
if dict_path:
lowerCamelCase_ =Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ =target_dict.pad_index
lowerCamelCase_ =target_dict.bos_index
lowerCamelCase_ =target_dict.pad_index
lowerCamelCase_ =target_dict.bos_index
lowerCamelCase_ =target_dict.eos_index
lowerCamelCase_ =len(target_dict.symbols )
lowerCamelCase_ =os.path.join(__snake_case , '''vocab.json''' )
if not os.path.isdir(__snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , __snake_case )
lowerCamelCase_ =WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
lowerCamelCase_ =WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
lowerCamelCase_ =SEWForCTC(__snake_case )
else:
lowerCamelCase_ =SEWModel(__snake_case )
feature_extractor.save_pretrained(__snake_case )
recursively_load_weights(__snake_case , __snake_case , __snake_case )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ : List[Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 75 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 62 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 76 |
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
order.append(SCREAMING_SNAKE_CASE__ )
return order
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return component
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ):
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
__UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1]
if not visited[vert]:
__UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
components_list.append(SCREAMING_SNAKE_CASE__ )
return components_list
| 62 | 0 |
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = [False] * len(_lowerCAmelCase )
lowercase__ : List[str] = [s]
lowercase__ : List[Any] = True
while queue:
lowercase__ : int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCAmelCase )
lowercase__ : Union[str, Any] = True
lowercase__ : Union[str, Any] = u
return visited[t]
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Any = [-1] * (len(_lowerCAmelCase ))
lowercase__ : str = 0
lowercase__ : int = []
lowercase__ : Tuple = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : Union[str, Any] = float('Inf' )
lowercase__ : Tuple = sink
while s != source:
# Find the minimum value in select path
lowercase__ : Optional[Any] = min(_lowerCAmelCase , graph[parent[s]][s] )
lowercase__ : List[Any] = parent[s]
max_flow += path_flow
lowercase__ : List[Any] = sink
while v != source:
lowercase__ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase__ : Optional[int] = parent[v]
for i in range(len(_lowerCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 77 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_A = {'vinai/bartpho-syllable': 1024}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =monolingual_vocab_file
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase ={}
__UpperCamelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =cnt
cnt += 1
with open(A_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__UpperCamelCase =line.strip().split()[0]
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
__UpperCamelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> List[str]:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Any:
return len(self.fairseq_ids_to_tokens )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self , A_ ) -> int:
return self.fairseq_ids_to_tokens[index]
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , A_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(A_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 62 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """CLIPImageProcessor"""
__UpperCamelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self :int , lowercase_ :List[str]=None , lowercase_ :Dict=None , **lowercase_ :Optional[int] ) -> Any:
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :Dict , lowercase_ :Tuple=None , lowercase_ :Union[str, Any]=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Tuple ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :List[Any] ) -> List[str]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :str , *lowercase_ :Dict , **lowercase_ :Optional[int] ) -> int:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self :Dict ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :List[str] ) -> Tuple:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 78 |
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Any=30 , __UpperCAmelCase : List[Any]=400 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , __UpperCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[int]=1 / 255 , __UpperCAmelCase : Tuple=True , ):
'''simple docstring'''
_A = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int]=False ):
'''simple docstring'''
if not batched:
_A = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size["shortest_edge"] * h / w )
_A = self.size["shortest_edge"]
elif w > h:
_A = self.size["shortest_edge"]
_A = int(self.size["shortest_edge"] * w / h )
else:
_A = self.size["shortest_edge"]
_A = self.size["shortest_edge"]
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
_A = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = DetaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_rescale" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_pad" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size" ) )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
_A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_A = json.loads(f.read() )
_A = {"image_id": 39769, "annotations": target}
# encode them
_A = DetaImageProcessor()
_A = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors="pt" )
# verify pixel values
_A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase ) )
# verify orig_size
_A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase ) )
# verify size
_A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase ) )
@slow
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_A = json.loads(f.read() )
_A = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
_A = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_A = DetaImageProcessor(format="coco_panoptic" )
_A = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors="pt" )
# verify pixel values
_A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase ) )
# verify masks
_A = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __UpperCAmelCase )
# verify orig_size
_A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase ) )
# verify size
_A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase ) )
| 79 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None:
super().__init__(**A_ )
__UpperCamelCase =size if size is not None else {'shortest_edge': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
__UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' )
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =resample
__UpperCamelCase =do_center_crop
__UpperCamelCase =crop_size
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_normalize
__UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase =do_convert_rgb
def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image:
__UpperCamelCase =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase =size if size is not None else self.size
__UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ )
__UpperCamelCase =resample if resample is not None else self.resample
__UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase =crop_size if crop_size is not None else self.crop_size
__UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ )
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase =image_mean if image_mean is not None else self.image_mean
__UpperCamelCase =image_std if image_std is not None else self.image_std
__UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase =make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase =[convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(A_ ) for image in images]
if do_resize:
__UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
__UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 62 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a__ : Any = sys.version_info >= (3, 1_0)
def _UpperCamelCase ( __A=None , __A=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = None
class lowercase_ ( a__ ):
__UpperCAmelCase = 'titi'
__UpperCAmelCase = 'toto'
class lowercase_ ( a__ ):
__UpperCAmelCase = 'titi'
__UpperCAmelCase = 'toto'
__UpperCAmelCase = 42
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
def __a ( self ):
UpperCamelCase__ = BasicEnum(self.foo )
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
def __a ( self ):
UpperCamelCase__ = MixedTypeEnum(self.foo )
@dataclass
class lowercase_ :
__UpperCAmelCase = None
__UpperCAmelCase = field(default=a__ , metadata={'help': 'help message'} )
__UpperCAmelCase = None
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[] )
@dataclass
class lowercase_ :
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[1, 2, 3] )
__UpperCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
__UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
__UpperCAmelCase = field()
__UpperCAmelCase = field()
__UpperCAmelCase = field()
def __a ( self ):
UpperCamelCase__ = BasicEnum(self.required_enum )
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = field()
__UpperCAmelCase = None
__UpperCAmelCase = field(default='toto' , metadata={'help': 'help message'} )
__UpperCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = None
@dataclass
class lowercase_ :
__UpperCAmelCase = None
__UpperCAmelCase = field(default=a__ , metadata={'help': 'help message'} )
__UpperCAmelCase = None
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[] )
class lowercase_ ( unittest.TestCase ):
def __a ( self , a , a ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase__ = {k: v for k, v in vars(a ).items() if k != "container"}
UpperCamelCase__ = {k: v for k, v in vars(a ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , a ) and yy.get("choices" , a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](a ) , yy["type"](a ) )
del xx["type"], yy["type"]
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , required=a )
expected.add_argument("--bar" , type=a , required=a )
expected.add_argument("--baz" , type=a , required=a )
expected.add_argument("--flag" , type=a , default=a , const=a , nargs="?" )
self.argparsersEqual(a , a )
UpperCamelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCamelCase__) , ) = parser.parse_args_into_dataclasses(a , look_for_args_file=a )
self.assertFalse(example.flag )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=a )
expected.add_argument("--baz" , default="toto" , type=a , help="help message" )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , default=a , const=a , nargs="?" )
expected.add_argument("--baz" , type=a , default=a , const=a , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=a , dest="baz" )
expected.add_argument("--opt" , type=a , default=a )
UpperCamelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a )
for dataclass_type in dataclass_types:
UpperCamelCase__ = HfArgumentParser(a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
UpperCamelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __a ( self ):
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=a )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=a )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(
a , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(a , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __a ( self ):
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=a , type=a )
expected.add_argument("--bar" , default=a , type=a , help="help message" )
expected.add_argument("--baz" , default=a , type=a )
expected.add_argument("--ces" , nargs="+" , default=[] , type=a )
expected.add_argument("--des" , nargs="+" , default=[] , type=a )
UpperCamelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a )
for dataclass_type in dataclass_types:
UpperCamelCase__ = HfArgumentParser(a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(a , Namespace(foo=a , bar=a , baz=a , ces=[] , des=[] ) )
UpperCamelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(a , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=a , required=a )
expected.add_argument("--required_str" , type=a , required=a )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a , )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , required=a )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a , )
expected.add_argument("--opt" , type=a , default=a )
expected.add_argument("--baz" , default="toto" , type=a , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCamelCase__ = parser.parse_dict(a )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(a , parser.parse_dict , a , allow_extra_keys=a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a , "temp_json" )
os.mkdir(a )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(a , a )
UpperCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a , "temp_yaml" )
os.mkdir(a )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(a , a )
UpperCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
self.assertIsNotNone(a )
| 80 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "yolos"
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
__UpperCamelCase =num_detection_tokens
__UpperCamelCase =use_mid_position_embeddings
__UpperCamelCase =auxiliary_loss
# Hungarian matcher
__UpperCamelCase =class_cost
__UpperCamelCase =bbox_cost
__UpperCamelCase =giou_cost
# Loss coefficients
__UpperCamelCase =bbox_loss_coefficient
__UpperCamelCase =giou_loss_coefficient
__UpperCamelCase =eos_coefficient
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self ) -> float:
return 1E-4
@property
def _a ( self ) -> int:
return 12
| 62 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=128 , __A=32 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> Union[str, Any]:
a =parent
a =batch_size
a =seq_length
a =is_training
a =use_input_mask
a =use_token_type_ids
a =use_labels
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =type_vocab_size
a =type_sequence_label_size
a =initializer_range
a =num_labels
a =num_choices
a =scope
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a =None
if self.use_input_mask:
a =random_attention_mask([self.batch_size, self.seq_length] )
a =None
if self.use_token_type_ids:
a =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a =None
a =None
a =None
if self.use_labels:
a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a =ids_tensor([self.batch_size] , self.num_choices )
a =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) =self.prepare_config_and_inputs()
a =True
a =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Tuple:
a =NezhaModel(config=__A )
model.to(__A )
model.eval()
a =model(__A , attention_mask=__A , token_type_ids=__A )
a =model(__A , token_type_ids=__A )
a =model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> List[Any]:
a =True
a =NezhaModel(__A )
model.to(__A )
model.eval()
a =model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
a =model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , )
a =model(__A , attention_mask=__A , token_type_ids=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
a =NezhaForMaskedLM(config=__A )
model.to(__A )
model.eval()
a =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> int:
a =NezhaForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
a =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any:
a =NezhaForPreTraining(config=__A )
model.to(__A )
model.eval()
a =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
a =NezhaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
a =model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Tuple:
a =self.num_labels
a =NezhaForSequenceClassification(__A )
model.to(__A )
model.eval()
a =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Union[str, Any]:
a =self.num_labels
a =NezhaForTokenClassification(config=__A )
model.to(__A )
model.eval()
a =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Union[str, Any]:
a =self.num_choices
a =NezhaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) =config_and_inputs
a ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> Union[str, Any]:
a =super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
a =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =NezhaModelTester(self )
a =ConfigTester(self , config_class=__A , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) =self.model_tester.prepare_config_and_inputs_for_decoder()
a =None
self.model_tester.create_and_check_model_as_decoder(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =NezhaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self ) -> str:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a =True
a =model_class(config=__A )
a =self._prepare_for_class(__A , __A )
a =torch.jit.trace(
__A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , '''bert.pt''' ) )
a =torch.jit.load(os.path.join(__A , '''bert.pt''' ) , map_location=__A )
loaded(inputs_dict['''input_ids'''].to(__A ) , inputs_dict['''attention_mask'''].to(__A ) )
@require_torch
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
a =torch.tensor([[0, 1, 2, 3, 4, 5]] )
a =torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a =model(__A , attention_mask=__A )[0]
a =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __A )
a =torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
a =torch.tensor([[0, 1, 2, 3, 4, 5]] )
a =torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a =model(__A , attention_mask=__A )[0]
a =torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , __A )
a =torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) ) | 81 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__ = logging.getLogger(__name__)
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
__lowerCamelCase = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
__lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , snake_case )
# Set seed
set_seed(training_args.seed )
try:
_lowerCAmelCase = processors[data_args.task_name]()
_lowerCAmelCase = processor.get_labels()
_lowerCAmelCase = len(snake_case )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case ) -> Dict:
_lowerCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(snake_case , p.label_ids )}
# Data collator
_lowerCAmelCase = DataCollatorWithPadding(snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCAmelCase = Trainer(
model=snake_case , args=snake_case , train_dataset=snake_case , eval_dataset=snake_case , compute_metrics=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate()
_lowerCAmelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(snake_case , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , snake_case , snake_case )
writer.write("""%s = %s\n""" % (key, value) )
results.update(snake_case )
return results
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 82 |
from __future__ import annotations
import math
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> None:
__UpperCamelCase =size
# approximate the overall size of segment tree with given value
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
# create array to store lazy update
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
__UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update
def _a ( self , A_ ) -> int:
return idx * 2
def _a ( self , A_ ) -> int:
return idx * 2 + 1
def _a ( self , A_ , A_ , A_ , A_ ) -> None:
if left_element == right_element:
__UpperCamelCase =a[left_element - 1]
else:
__UpperCamelCase =(left_element + right_element) // 2
self.build(self.left(A_ ) , A_ , A_ , A_ )
self.build(self.right(A_ ) , mid + 1 , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__UpperCamelCase =val
if left_element != right_element:
__UpperCamelCase =val
__UpperCamelCase =val
__UpperCamelCase =True
__UpperCamelCase =True
return True
__UpperCamelCase =(left_element + right_element) // 2
self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ )
self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
return True
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__UpperCamelCase =(left_element + right_element) // 2
__UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ )
__UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ )
return max(A_ , A_ )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_A = 15
_A = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 62 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
lowercase__ = StableDiffusionLDMaDPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_UpperCamelCase : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
_UpperCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_UpperCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_UpperCamelCase : Tuple = CLIPTextModel(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCamelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple=0 ):
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
_UpperCamelCase : Optional[int] = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCamelCase : List[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCamelCase : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : Optional[int] = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
_UpperCamelCase : List[Any] = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : str = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCamelCase : List[Any] = ldmad_pipe(**lowerCamelCase__ )
_UpperCamelCase , _UpperCamelCase : Tuple = output.rgb, output.depth
_UpperCamelCase : Optional[int] = rgb[0, -3:, -3:, -1]
_UpperCamelCase : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase : str = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
_UpperCamelCase : Dict = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.get_dummy_components()
_UpperCamelCase : str = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
_UpperCamelCase : Any = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = 3 * [inputs['prompt']]
# forward
_UpperCamelCase : int = ldmad_pipe(**lowerCamelCase__ )
_UpperCamelCase , _UpperCamelCase : Dict = output.rgb, output.depth
_UpperCamelCase : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase : List[str] = depth_slice_a[0, -3:, -1]
_UpperCamelCase : Optional[int] = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCamelCase : str = 3 * [inputs.pop('prompt' )]
_UpperCamelCase : str = ldmad_pipe.tokenizer(
lowerCamelCase__ ,padding='max_length' ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCamelCase__ ,return_tensors='pt' ,)
_UpperCamelCase : str = text_inputs['input_ids'].to(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = ldmad_pipe.text_encoder(lowerCamelCase__ )[0]
_UpperCamelCase : Union[str, Any] = prompt_embeds
# forward
_UpperCamelCase : Union[str, Any] = ldmad_pipe(**lowerCamelCase__ )
_UpperCamelCase , _UpperCamelCase : Optional[int] = output.rgb, output.depth
_UpperCamelCase : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase : Union[str, Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Dict = self.get_dummy_components()
_UpperCamelCase : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
_UpperCamelCase : Dict = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
_UpperCamelCase : Dict = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCamelCase : int = 'french fries'
_UpperCamelCase : str = ldmad_pipe(**lowerCamelCase__ ,negative_prompt=lowerCamelCase__ )
_UpperCamelCase , _UpperCamelCase : str = output.rgb, output.depth
_UpperCamelCase : Optional[int] = rgb[0, -3:, -3:, -1]
_UpperCamelCase : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase : List[str] = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
_UpperCamelCase : Any = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str]="cpu" ,lowerCamelCase__ : int=torch.floataa ,lowerCamelCase__ : Optional[Any]=0 ):
'''simple docstring'''
_UpperCamelCase : List[str] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCamelCase : Tuple = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase : int = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ,dtype=lowerCamelCase__ )
_UpperCamelCase : Tuple = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : str = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
_UpperCamelCase : str = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : List[Any] = self.get_inputs(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = ldmad_pipe(**lowerCamelCase__ )
_UpperCamelCase , _UpperCamelCase : Optional[int] = output.rgb, output.depth
_UpperCamelCase : List[Any] = rgb[0, -3:, -3:, -1].flatten()
_UpperCamelCase : Optional[Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCamelCase : Optional[int] = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
_UpperCamelCase : Optional[Any] = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict="cpu" ,lowerCamelCase__ : List[Any]=torch.floataa ,lowerCamelCase__ : Tuple=0 ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCamelCase : List[str] = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase : Optional[int] = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ,dtype=lowerCamelCase__ )
_UpperCamelCase : Any = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Dict = self.get_inputs(lowerCamelCase__ )
_UpperCamelCase : List[Any] = ldmad_pipe(**lowerCamelCase__ )
_UpperCamelCase , _UpperCamelCase : Dict = output.rgb, output.depth
_UpperCamelCase : Optional[int] = 0.4_9_5_5_8_6
_UpperCamelCase : Optional[Any] = 0.3_3_7_9_5_5_1_5
_UpperCamelCase : Dict = 1_1_2.4_8_5_1_8
_UpperCamelCase : Optional[int] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Tuple = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = self.get_inputs(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = ldmad_pipe(**lowerCamelCase__ )
_UpperCamelCase , _UpperCamelCase : List[Any] = output.rgb, output.depth
_UpperCamelCase : int = 0.4_1_9_4_1_2_7
_UpperCamelCase : List[Any] = 0.3_5_3_7_5_5_8_6
_UpperCamelCase : int = 0.5_6_3_8_5_0_2
_UpperCamelCase : Tuple = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 83 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ):
__UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' )
__UpperCamelCase =soup.find_all('td' , attrs='titleColumn' )
__UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ):
__UpperCamelCase =get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file:
__UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | 0 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Dict = os.path.abspath(lowercase__ )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowerCAmelCase_ :Any = tf.train.list_variables(lowercase__ )
lowerCAmelCase_ :List[str] = []
lowerCAmelCase_ :str = []
lowerCAmelCase_ :int = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCAmelCase_ :Union[str, Any] = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCAmelCase_ :Dict = name[1:]
# figure out how many levels deep the name is
lowerCAmelCase_ :Tuple = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(lowercase__ )
# read data
lowerCAmelCase_ :Optional[Any] = tf.train.load_variable(lowercase__ , lowercase__ )
names.append("""/""".join(lowercase__ ) )
arrays.append(lowercase__ )
logger.info(f"""Read a total of {len(lowercase__ ):,} layers""" )
# Sanity check
if len(set(lowercase__ ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(lowercase__ ) )})""" )
lowerCAmelCase_ :Dict = list(set(lowercase__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(lowercase__ , lowercase__ ):
lowerCAmelCase_ :str = full_name.split("""/""" )
lowerCAmelCase_ :Union[str, Any] = model
lowerCAmelCase_ :Tuple = []
for i, m_name in enumerate(lowercase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
lowerCAmelCase_ :Dict = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
lowerCAmelCase_ :Dict = getattr(lowercase__ , """embeddings""" )
lowerCAmelCase_ :List[str] = getattr(lowercase__ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
lowerCAmelCase_ :Optional[Any] = getattr(lowercase__ , """encoder""" )
lowerCAmelCase_ :List[str] = getattr(lowercase__ , """layer""" )
lowerCAmelCase_ :Tuple = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
lowerCAmelCase_ :Tuple = getattr(lowercase__ , """pooler""" )
lowerCAmelCase_ :List[Any] = getattr(lowercase__ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
lowerCAmelCase_ :Dict = getattr(lowercase__ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
lowerCAmelCase_ :Any = getattr(lowercase__ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
lowerCAmelCase_ :int = getattr(lowercase__ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
lowerCAmelCase_ :str = getattr(lowercase__ , """token_type_embeddings""" )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append("""weight""" )
lowerCAmelCase_ :Tuple = getattr(lowercase__ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
lowerCAmelCase_ :str = getattr(lowercase__ , """attention""" )
lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
lowerCAmelCase_ :str = getattr(lowercase__ , """attention""" )
lowerCAmelCase_ :Any = getattr(lowercase__ , """output""" )
lowerCAmelCase_ :str = getattr(lowercase__ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
lowerCAmelCase_ :Any = getattr(lowercase__ , """attention""" )
lowerCAmelCase_ :List[Any] = getattr(lowercase__ , """output""" )
lowerCAmelCase_ :List[Any] = getattr(lowercase__ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
lowerCAmelCase_ :Any = getattr(lowercase__ , """output""" )
lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """output""" )
lowerCAmelCase_ :Any = getattr(lowercase__ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
lowerCAmelCase_ :Union[str, Any] = getattr(lowercase__ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
lowerCAmelCase_ :Union[str, Any] = getattr(lowercase__ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
lowerCAmelCase_ :List[str] = getattr(lowercase__ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
lowerCAmelCase_ :Optional[Any] = getattr(lowercase__ , """intermediate""" )
lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
lowerCAmelCase_ :str = getattr(lowercase__ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
lowerCAmelCase_ :Union[str, Any] = getattr(lowercase__ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
lowerCAmelCase_ :str = getattr(lowercase__ , """weight""" )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowerCAmelCase_ :Dict = """.""".join(lowercase__ )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowercase__ ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , lowercase__ ):
lowerCAmelCase_ :Tuple = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCAmelCase_ :Optional[Any] = array.transpose()
if pointer.shape == array.shape:
lowerCAmelCase_ :List[str] = torch.from_numpy(lowercase__ )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
logger.info(f"""Loading model based on config from {config_path}...""" )
lowerCAmelCase_ :Optional[int] = BertConfig.from_json_file(lowercase__ )
lowerCAmelCase_ :Optional[Any] = BertModel(lowercase__ )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
__UpperCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 84 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip_vision_model"
def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =qkv_bias
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "instructblip_qformer"
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =cross_attention_frequency
__UpperCamelCase =encoder_hidden_size
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip"
UpperCAmelCase__ : Optional[Any] = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]:
super().__init__(**A_ )
if vision_config is None:
__UpperCamelCase ={}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__UpperCamelCase ={}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__UpperCamelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCamelCase =InstructBlipVisionConfig(**A_ )
__UpperCamelCase =InstructBlipQFormerConfig(**A_ )
__UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ )
__UpperCamelCase =self.text_config.tie_word_embeddings
__UpperCamelCase =self.text_config.is_encoder_decoder
__UpperCamelCase =num_query_tokens
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase =1.0
__UpperCamelCase =0.02
@classmethod
def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.qformer_config.to_dict()
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 62 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : List[str] = "facebook/bart-large-mnli"
lowerCAmelCase_ : Optional[int] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
lowerCAmelCase_ : Optional[int] = "text_classifier"
lowerCAmelCase_ : Optional[Any] = AutoTokenizer
lowerCAmelCase_ : Optional[int] = AutoModelForSequenceClassification
lowerCAmelCase_ : int = ["text", ["text"]]
lowerCAmelCase_ : List[Any] = ["text"]
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setup()
snake_case_ = self.model.config
snake_case_ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
snake_case_ = int(a__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowerCAmelCase__ ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = labels
return self.pre_processor(
[text] * len(a__ ) , [F'This example is {label}' for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = outputs.logits
snake_case_ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 85 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(rows * cols * num_images )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
__UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =labels_dense.shape[0]
__UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
__UpperCamelCase =numpy.zeros((num_labels, num_classes) )
__UpperCamelCase =1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
A_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__UpperCamelCase =10000
__UpperCamelCase =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCamelCase =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCamelCase =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCamelCase =images.astype(numpy.floataa )
__UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 )
__UpperCamelCase =images
__UpperCamelCase =labels
__UpperCamelCase =0
__UpperCamelCase =0
@property
def _a ( self ) -> Tuple:
return self._images
@property
def _a ( self ) -> Union[str, Any]:
return self._labels
@property
def _a ( self ) -> Optional[Any]:
return self._num_examples
@property
def _a ( self ) -> List[str]:
return self._epochs_completed
def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]:
if fake_data:
__UpperCamelCase =[1] * 784
__UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
__UpperCamelCase =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perma]
__UpperCamelCase =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCamelCase =self._num_examples - start
__UpperCamelCase =self._images[start : self._num_examples]
__UpperCamelCase =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perm]
__UpperCamelCase =self.labels[perm]
# Start next epoch
__UpperCamelCase =0
__UpperCamelCase =batch_size - rest_num_examples
__UpperCamelCase =self._index_in_epoch
__UpperCamelCase =self._images[start:end]
__UpperCamelCase =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCamelCase =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
__UpperCamelCase =f.size()
print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =fake()
__UpperCamelCase =fake()
__UpperCamelCase =fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
__UpperCamelCase =DEFAULT_SOURCE_URL
__UpperCamelCase ='train-images-idx3-ubyte.gz'
__UpperCamelCase ='train-labels-idx1-ubyte.gz'
__UpperCamelCase ='t10k-images-idx3-ubyte.gz'
__UpperCamelCase ='t10k-labels-idx1-ubyte.gz'
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =(
'Validation size should be between 0 and '
F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =train_images[:validation_size]
__UpperCamelCase =train_labels[:validation_size]
__UpperCamelCase =train_images[validation_size:]
__UpperCamelCase =train_labels[validation_size:]
__UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed}
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 62 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = TransfoXLTokenizer
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
def _a ( self ) -> Union[str, Any]:
super().setUp()
__UpperCamelCase =[
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _a ( self , **A_ ) -> Optional[int]:
__UpperCamelCase =True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='<unk> UNwanted , running'
__UpperCamelCase ='<unk> unwanted, running'
return input_text, output_text
def _a ( self ) -> str:
__UpperCamelCase =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ )
__UpperCamelCase =tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] )
def _a ( self ) -> Any:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> int:
__UpperCamelCase =TransfoXLTokenizer(lower_case=A_ )
__UpperCamelCase ='Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
__UpperCamelCase =[
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =len(A_ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 62 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class snake_case_ ( __A ):
def __init__( self : List[Any] , **lowercase_ : Union[str, Any] ) -> Tuple:
super().__init__(**lowercase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , lowercase_ : Union[str, List[str], "Image", List["Image"]] , **lowercase_ : str ) -> Optional[int]:
return super().__call__(lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Any , **lowercase_ : Tuple ) -> Optional[Any]:
lowercase__ : List[Any] = {}
if "candidate_labels" in kwargs:
lowercase__ : int = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowercase__ : Dict = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __UpperCamelCase ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None , lowercase_ : List[str]="This is a photo of {}." ) -> Tuple:
lowercase__ : str = load_image(lowercase_ )
lowercase__ : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__ : Optional[int] = candidate_labels
lowercase__ : Optional[Any] = [hypothesis_template.format(lowercase_ ) for x in candidate_labels]
lowercase__ : Union[str, Any] = self.tokenizer(lowercase_ , return_tensors=self.framework , padding=lowercase_ )
lowercase__ : int = [text_inputs]
return inputs
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Optional[int]:
lowercase__ : Tuple = model_inputs.pop("candidate_labels" )
lowercase__ : List[Any] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , lowercase_ ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : Any = text_inputs[0][0]
lowercase__ : List[str] = self.model(**lowercase_ , **lowercase_ )
lowercase__ : str = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def __UpperCamelCase ( self : str , lowercase_ : int ) -> List[Any]:
lowercase__ : Any = model_outputs.pop("candidate_labels" )
lowercase__ : Tuple = model_outputs["logits"][0]
if self.framework == "pt":
lowercase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : str = probs.tolist()
if not isinstance(lowercase_ , lowercase_ ):
lowercase__ : Union[str, Any] = [scores]
elif self.framework == "tf":
lowercase__ : Optional[Any] = stable_softmax(lowercase_ , axis=-1 )
lowercase__ : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowercase__ : Optional[Any] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(lowercase_ , lowercase_ ) , key=lambda lowercase_ : -x[0] )
]
return result
| 87 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[Any]=99 , UpperCamelCase__ : Dict=36 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Dict=6 , UpperCamelCase__ : Optional[Any]=6 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=1000 , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = text_seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = coordinate_size
__magic_name__ = shape_size
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__magic_name__ = text_seq_length
__magic_name__ = (image_size // patch_size) ** 2 + 1
__magic_name__ = self.text_seq_length + self.image_seq_length
def _lowercase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__magic_name__ = bbox[i, j, 3]
__magic_name__ = bbox[i, j, 1]
__magic_name__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__magic_name__ = bbox[i, j, 2]
__magic_name__ = bbox[i, j, 0]
__magic_name__ = t
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.text_seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__magic_name__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = LayoutLMvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# text + image
__magic_name__ = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ )
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__magic_name__ = model(pixel_values=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = LayoutLMvaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = LayoutLMvaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowercase ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowercase ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
return True
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = LayoutLMvaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCamelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
return inputs_dict
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = LayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCamelCase__ )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
__magic_name__ = torch.tensor([[1, 2]] )
__magic_name__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__magic_name__ = model(
input_ids=input_ids.to(UpperCamelCase__ ) , bbox=bbox.to(UpperCamelCase__ ) , pixel_values=pixel_values.to(UpperCamelCase__ ) , )
# verify the logits
__magic_name__ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 88 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return np.sum(outputs == labels )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f:
__UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
next(SCREAMING_SNAKE_CASE__ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
for dataset in encoded_datasets:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =mc_label
__UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) )
return tensor_datasets
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
print(SCREAMING_SNAKE_CASE__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCamelCase =['_start_', '_delimiter_', '_classify_']
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
model.to(SCREAMING_SNAKE_CASE__ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj]
logger.info('Encoding dataset...' )
__UpperCamelCase =load_rocstories_dataset(args.train_dataset )
__UpperCamelCase =load_rocstories_dataset(args.eval_dataset )
__UpperCamelCase =(train_dataset, eval_dataset)
__UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ )
# Compute the max input length for the Transformer
__UpperCamelCase =model.config.n_positions // 2 - 2
__UpperCamelCase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1]
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size )
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCamelCase =args.max_steps
__UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCamelCase =list(model.named_parameters() )
__UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__UpperCamelCase =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCamelCase =get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
if args.do_train:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCamelCase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE__ )
if args.do_eval:
model.eval()
__UpperCamelCase , __UpperCamelCase =0, 0
__UpperCamelCase , __UpperCamelCase =0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model(
SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =mc_logits.detach().cpu().numpy()
__UpperCamelCase =mc_labels.to('cpu' ).numpy()
__UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCamelCase =eval_loss / nb_eval_steps
__UpperCamelCase =eval_accuracy / nb_eval_examples
__UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None
__UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 62 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
print('Loading config file...' )
def flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_="." ):
_a : Optional[Any] = []
for k, v in d.items():
_a : str = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_ , sep=lowerCAmelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase_ )
_a : Optional[Any] = argparse.Namespace()
with open(lowerCAmelCase_ , 'r' ) as yaml_file:
try:
_a : List[str] = yaml.load(lowerCAmelCase_ , Loader=yaml.FullLoader )
_a : str = flatten_yaml_as_dict(lowerCAmelCase_ )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(lowerCAmelCase_ , str(lowerCAmelCase_ ) ) )
return config
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : int = MobileViTVaConfig()
_a : Tuple = False
# dataset
if task_name.startswith('imagenet1k_' ):
_a : Any = 1000
if int(task_name.strip().split('_' )[-1] ) == 384:
_a : Optional[Any] = 384
else:
_a : List[Any] = 256
_a : Optional[Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
_a : str = 21000
if int(task_name.strip().split('_' )[-1] ) == 384:
_a : Any = 384
else:
_a : int = 256
_a : List[Any] = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
_a : Union[str, Any] = 151
_a : Any = 512
_a : str = 'ade20k-id2label.json'
_a : int = True
elif task_name.startswith('voc_' ):
_a : Tuple = 21
_a : Optional[Any] = 512
_a : Optional[int] = 'pascal-voc-id2label.json'
_a : Tuple = True
# orig_config
_a : List[str] = load_orig_config_file(lowerCAmelCase_ )
assert getattr(lowerCAmelCase_ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
_a : List[str] = getattr(lowerCAmelCase_ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(lowerCAmelCase_ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_a : Tuple = getattr(lowerCAmelCase_ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_a : Tuple = getattr(lowerCAmelCase_ , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
_a : str = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
_a : str = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
_a : int = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
_a : Optional[int] = 'huggingface/label-files'
_a : int = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_a : List[str] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_a : int = idalabel
_a : List[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : List[Any] = dct.pop(lowerCAmelCase_ )
_a : List[Any] = val
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]:
if base_model:
_a : str = ''
else:
_a : Optional[int] = 'mobilevitv2.'
_a : Tuple = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_a : Optional[Any] = k[8:]
else:
_a : Union[str, Any] = k
if ".block." in k:
_a : List[Any] = k_new.replace('.block.' , '.' )
if ".conv." in k:
_a : Tuple = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
_a : Optional[Any] = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
_a : List[str] = k_new.replace('conv_1.' , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
_a : Optional[int] = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
_a : int = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
_a : Tuple = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
_a : Union[str, Any] = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
_a : Tuple = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
_a : int = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
_a : Dict = [0, 1]
elif i == 4:
_a : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_a : Union[str, Any] = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
_a : Optional[Any] = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
_a : Dict = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
_a : Any = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
_a : List[str] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
_a : Tuple = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
_a : Optional[Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
_a : Union[str, Any] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
_a : Optional[int] = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
_a : List[Any] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
_a : str = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
_a : Tuple = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
_a : List[str] = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
_a : Any = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(lowerCAmelCase_ )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( ) -> Any:
_a : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_a : Union[str, Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_a : Union[str, Any] = get_mobilevitva_config(lowerCAmelCase_ , lowerCAmelCase_ )
# load original state_dict
_a : Optional[int] = torch.load(lowerCAmelCase_ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
_a : Optional[Any] = MobileViTVaForSemanticSegmentation(lowerCAmelCase_ ).eval()
_a : Tuple = False
else:
_a : int = MobileViTVaForImageClassification(lowerCAmelCase_ ).eval()
_a : Optional[Any] = False
# remove and rename some keys of load the original model
_a : str = checkpoint
remove_unused_keys(lowerCAmelCase_ )
_a : Dict = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load modified state_dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_a : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_a : Dict = image_processor(images=prepare_img() , return_tensors='pt' )
_a : Dict = model(**lowerCAmelCase_ )
# verify classification model
if task_name.startswith('imagenet' ):
_a : Dict = outputs.logits
_a : int = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_a : List[str] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 89 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ):
__UpperCamelCase =1
__UpperCamelCase =0
__UpperCamelCase =1
__UpperCamelCase =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = 42
# setable values
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
return cls(common=lowerCamelCase__ , init_noise_sigma=lowerCamelCase__ , timesteps=lowerCamelCase__ )
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
snake_case_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
snake_case_ = 42
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return True
@register_to_config
def __init__( self , lowerCamelCase__ = 1_000 , lowerCamelCase__ = 0.00_01 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = "linear" , lowerCamelCase__ = None , lowerCamelCase__ = "fixed_small" , lowerCamelCase__ = True , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = jnp.floataa , ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = dtype
def lowercase_ ( self , lowerCamelCase__ = None ) -> DDPMSchedulerState:
'''simple docstring'''
if common is None:
__lowerCamelCase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowerCamelCase = jnp.array(1.0 , dtype=self.dtype )
__lowerCamelCase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCamelCase__ , init_noise_sigma=lowerCamelCase__ , timesteps=lowerCamelCase__ , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> jnp.ndarray:
'''simple docstring'''
return sample
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = () ) -> DDPMSchedulerState:
'''simple docstring'''
__lowerCamelCase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (jnp.arange(0 , lowerCamelCase__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCamelCase__ , timesteps=lowerCamelCase__ , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = state.common.alphas_cumprod[t]
__lowerCamelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowerCamelCase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowerCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowerCamelCase = jnp.clip(lowerCamelCase__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowerCamelCase = jnp.log(jnp.clip(lowerCamelCase__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
__lowerCamelCase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowerCamelCase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowerCamelCase = variance
__lowerCamelCase = state.common.betas[t]
__lowerCamelCase = (predicted_variance + 1) / 2
__lowerCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
__lowerCamelCase = timestep
if key is None:
__lowerCamelCase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowerCamelCase , __lowerCamelCase = jnp.split(lowerCamelCase__ , sample.shape[1] , axis=1 )
else:
__lowerCamelCase = None
# 1. compute alphas, betas
__lowerCamelCase = state.common.alphas_cumprod[t]
__lowerCamelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowerCamelCase = 1 - alpha_prod_t
__lowerCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowerCamelCase = model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowerCamelCase = jnp.clip(lowerCamelCase__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowerCamelCase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowerCamelCase = jax.random.split(lowerCamelCase__ , num=1 )
__lowerCamelCase = jax.random.normal(lowerCamelCase__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCamelCase__ , lowerCamelCase__ , predicted_variance=lowerCamelCase__ ) ** 0.5) * noise
__lowerCamelCase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowerCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCamelCase__ , state=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 90 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = []
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any]):
'''simple docstring'''
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = pos
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : Union[str, Any]):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE_ : Any = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 * start + 1
else:
SCREAMING_SNAKE_CASE_ : List[Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = temp, tempa
SCREAMING_SNAKE_CASE_ : str = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , lowercase_)
self.top_to_bottom(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE_ : int = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
SCREAMING_SNAKE_CASE_ : int = heap[parent]
SCREAMING_SNAKE_CASE_ : List[Any] = position[parent]
self.set_position(position[parent] , lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
SCREAMING_SNAKE_CASE_ : Optional[Any] = temp
self.set_position(lowercase_ , lowercase_)
break
SCREAMING_SNAKE_CASE_ : Tuple = parent
else:
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : Union[str, Any] = temp
self.set_position(lowercase_ , 0)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowercase_) // 2 - 1
for i in range(lowercase_ , -1 , -1):
self.top_to_bottom(lowercase_ , lowercase_ , len(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Dict , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = positions[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = sys.maxsize
self.top_to_bottom(lowercase_ , 0 , len(lowercase_) , lowercase_)
return temp
def _A (__a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = Heap()
SCREAMING_SNAKE_CASE_ : List[Any] = [0] * len(__a )
SCREAMING_SNAKE_CASE_ : List[str] = [-1] * len(__a ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE_ : Optional[int] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE_ : List[str] = []
for vertex in range(len(__a ) ):
distance_tv.append(sys.maxsize )
positions.append(__a )
heap.node_position.append(__a )
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Any = distance
heap.heapify(__a , __a )
for _ in range(1 , len(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = heap.delete_minimum(__a , __a )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE_ : str = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__a )]
):
SCREAMING_SNAKE_CASE_ : Optional[int] = distance
heap.bottom_to_top(
__a , heap.get_position(__a ) , __a , __a )
SCREAMING_SNAKE_CASE_ : Dict = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase_ : List[str] = int(input("""Enter number of edges: """).strip())
UpperCAmelCase_ : Optional[int] = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase_ : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 91 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : tuple , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str=False , ):
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE_ , output_names=SCREAMING_SNAKE_CASE_ , dynamic_axes=SCREAMING_SNAKE_CASE_ , do_constant_folding=SCREAMING_SNAKE_CASE_ , use_external_data_format=SCREAMING_SNAKE_CASE_ , enable_onnx_checker=SCREAMING_SNAKE_CASE_ , opset_version=SCREAMING_SNAKE_CASE_ , )
else:
export(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE_ , output_names=SCREAMING_SNAKE_CASE_ , dynamic_axes=SCREAMING_SNAKE_CASE_ , do_constant_folding=SCREAMING_SNAKE_CASE_ , opset_version=SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False ):
__lowerCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = Path(SCREAMING_SNAKE_CASE_ )
# TEXT ENCODER
__lowerCAmelCase = pipeline.text_encoder.config.max_position_embeddings
__lowerCAmelCase = pipeline.text_encoder.config.hidden_size
__lowerCAmelCase = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=SCREAMING_SNAKE_CASE_ , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=SCREAMING_SNAKE_CASE_ , )
del pipeline.text_encoder
# UNET
__lowerCAmelCase = pipeline.unet.config.in_channels
__lowerCAmelCase = pipeline.unet.config.sample_size
__lowerCAmelCase = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
torch.randn(2 ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
torch.randn(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
False,
) , output_path=SCREAMING_SNAKE_CASE_ , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=SCREAMING_SNAKE_CASE_ , use_external_data_format=SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = str(unet_path.absolute().as_posix() )
__lowerCAmelCase = os.path.dirname(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = onnx.load(SCREAMING_SNAKE_CASE_ )
# clean up existing tensor files
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
os.mkdir(SCREAMING_SNAKE_CASE_ )
# collate external tensor files into one
onnx.save_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , save_as_external_data=SCREAMING_SNAKE_CASE_ , all_tensors_to_one_file=SCREAMING_SNAKE_CASE_ , location="weights.pb" , convert_attribute=SCREAMING_SNAKE_CASE_ , )
del pipeline.unet
# VAE ENCODER
__lowerCAmelCase = pipeline.vae
__lowerCAmelCase = vae_encoder.config.in_channels
__lowerCAmelCase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__lowerCAmelCase = lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : vae_encoder.encode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0].sample()
onnx_export(
SCREAMING_SNAKE_CASE_ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=SCREAMING_SNAKE_CASE_ , )
# VAE DECODER
__lowerCAmelCase = pipeline.vae
__lowerCAmelCase = vae_decoder.config.latent_channels
__lowerCAmelCase = vae_decoder.config.out_channels
# forward only through the decoder part
__lowerCAmelCase = vae_encoder.decode
onnx_export(
SCREAMING_SNAKE_CASE_ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=SCREAMING_SNAKE_CASE_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__lowerCAmelCase = pipeline.safety_checker
__lowerCAmelCase = safety_checker.config.vision_config.num_channels
__lowerCAmelCase = safety_checker.config.vision_config.image_size
__lowerCAmelCase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
torch.randn(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=SCREAMING_SNAKE_CASE_ , )
del pipeline.safety_checker
__lowerCAmelCase = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
__lowerCAmelCase = pipeline.feature_extractor
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("ONNX pipeline saved to" , SCREAMING_SNAKE_CASE_ )
del pipeline
del onnx_pipeline
__lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
UpperCamelCase__ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 92 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : int = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''donut-swin'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = image_size
lowercase_ : List[str] = patch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : List[Any] = embed_dim
lowercase_ : List[str] = depths
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = num_heads
lowercase_ : int = window_size
lowercase_ : List[str] = mlp_ratio
lowercase_ : List[Any] = qkv_bias
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Optional[Any] = drop_path_rate
lowercase_ : Tuple = hidden_act
lowercase_ : Any = use_absolute_embeddings
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : str = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
| 93 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ ) -> float:
return 0.0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show()
| 62 | 0 |
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
a :Optional[Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCAmelCase_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCAmelCase : Optional[int] = """
import os
"""
UpperCAmelCase : List[str] = """
def foo():
import os
return False
"""
UpperCAmelCase : Dict = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
UpperCAmelCase : Dict = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
UpperCAmelCase : str = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
UpperCAmelCase : Any = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
UpperCAmelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
UpperCAmelCase : Any = """
import os
try:
import bar
except:
raise ValueError()
"""
UpperCAmelCase : int = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
UpperCAmelCase : List[Any] = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
UpperCAmelCase : List[Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
a__ : int =os.path.join(SCREAMING_SNAKE_CASE , "test_file.py" )
with open(SCREAMING_SNAKE_CASE , "w" ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE )
a__ : List[str] =get_imports(SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 95 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "mvp"
UpperCAmelCase__ : Tuple = ["past_key_values"]
UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =classifier_dropout
__UpperCamelCase =use_cache
__UpperCamelCase =encoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase =use_prompt
__UpperCamelCase =prompt_length
__UpperCamelCase =prompt_mid_dim
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
__UpperCamelCase =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 62 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ):
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : int = use_attention_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Optional[int] = num_choices
def A_ ( self ):
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Optional[int] = None
if self.use_attention_mask:
_lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : List[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A_ ( self ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def A_ ( self ):
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Dict = True
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = FlaxRobertaModelTester(self )
@slow
def A_ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Any = model_class_name.from_pretrained('roberta-base' , from_pt=lowercase )
_lowerCamelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase ) | 96 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = GPTaTokenizer
UpperCAmelCase__ : Any = GPTaTokenizerFast
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : int = {"add_prefix_space": True}
UpperCAmelCase__ : Any = False
def _a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase ={'unk_token': '<unk>'}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _a ( self , **A_ ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , **A_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='lower newer'
__UpperCamelCase ='lower newer'
return input_text, output_text
def _a ( self ) -> List[Any]:
__UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='lower newer'
__UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self ) -> int:
if not self.test_rust_tokenizer:
return
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase ='lower newer'
# Testing tokenization
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids without special tokens
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids with special tokens
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# Testing the unknown token
__UpperCamelCase =tokens + [rust_tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self , *A_ , **A_ ) -> Optional[int]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _a ( self , A_=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
def _a ( self ) -> int:
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__UpperCamelCase =tokenizer.pad_token_id
__UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
__UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ='$$$'
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ )
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =tokenizer.bos_token_id
__UpperCamelCase =tokenizer(A_ )
__UpperCamelCase =tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] , A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__UpperCamelCase =tokenizer.decode(out_s.input_ids )
__UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> Any:
# TODO: change to self.get_tokenizers() when the fast version is implemented
__UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='Encode this.'
__UpperCamelCase ='This one too please.'
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(
A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , )
__UpperCamelCase =encoded_sequence_dict['input_ids']
__UpperCamelCase =encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(A_ ) , len(A_ ) )
__UpperCamelCase =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
__UpperCamelCase =[x for x in filtered_sequence if x is not None]
self.assertEqual(A_ , A_ )
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
def _a ( self ) -> Dict:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# Same as above
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='bos'
__UpperCamelCase =tokenizer.get_vocab()['bos']
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# We changed the bos token
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
| 62 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__snake_case = logging.get_logger('''transformers.models.speecht5''')
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
hf_model.apply_weight_norm()
UpperCamelCase__ :List[str] = checkpoint['''input_conv.weight_g''']
UpperCamelCase__ :Any = checkpoint['''input_conv.weight_v''']
UpperCamelCase__ :Dict = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
UpperCamelCase__ :Optional[Any] = checkpoint[f'''upsamples.{i}.1.weight_g''']
UpperCamelCase__ :List[str] = checkpoint[f'''upsamples.{i}.1.weight_v''']
UpperCamelCase__ :Dict = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase__ :int = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase__ :Dict = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase__ :Any = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase__ :Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase__ :Optional[Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase__ :str = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase__ :Tuple = checkpoint['''output_conv.1.weight_g''']
UpperCamelCase__ :Dict = checkpoint['''output_conv.1.weight_v''']
UpperCamelCase__ :List[str] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def a ( __a , __a , __a , __a=None , __a=None , ) -> str:
'''simple docstring'''
if config_path is not None:
UpperCamelCase__ :int = SpeechTaHifiGanConfig.from_pretrained(__a )
else:
UpperCamelCase__ :int = SpeechTaHifiGanConfig()
UpperCamelCase__ :Any = SpeechTaHifiGan(__a )
UpperCamelCase__ :Tuple = torch.load(__a )
load_weights(orig_checkpoint['''model''']['''generator'''] , __a , __a )
UpperCamelCase__ :Optional[int] = np.load(__a )
UpperCamelCase__ :int = stats[0].reshape(-1 )
UpperCamelCase__ :Optional[int] = stats[1].reshape(-1 )
UpperCamelCase__ :str = torch.from_numpy(__a ).float()
UpperCamelCase__ :List[Any] = torch.from_numpy(__a ).float()
model.save_pretrained(__a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 97 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 62 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "nllb-moe"
snake_case__ = ["past_key_values"]
snake_case__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple ,lowerCamelCase__ : Optional[Any]=128_112 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : Any=4_096 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : Any=4_096 ,lowerCamelCase__ : str=16 ,lowerCamelCase__ : str=0.0_5 ,lowerCamelCase__ : Dict=0.0_5 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Dict="relu" ,lowerCamelCase__ : Any=1_024 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Optional[int]="float32" ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : Dict=128 ,lowerCamelCase__ : int=64 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : Dict=0.0_0_1 ,lowerCamelCase__ : List[str]=0.0_0_1 ,lowerCamelCase__ : Optional[Any]="all" ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Optional[int]=1.0 ,lowerCamelCase__ : Union[str, Any]=0.2 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : List[Any]=0 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Optional[Any]=False ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = router_z_loss_coef
UpperCAmelCase__ = router_aux_loss_coef
UpperCAmelCase__ = decoder_sparse_step
UpperCAmelCase__ = encoder_sparse_step
UpperCAmelCase__ = num_experts
UpperCAmelCase__ = expert_capacity
UpperCAmelCase__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
UpperCAmelCase__ = router_dtype
UpperCAmelCase__ = router_ignore_padding_tokens
UpperCAmelCase__ = batch_prioritized_routing
UpperCAmelCase__ = second_expert_policy
UpperCAmelCase__ = normalize_router_prob_before_dropping
UpperCAmelCase__ = moe_eval_capacity_token_fraction
UpperCAmelCase__ = moe_token_dropout
UpperCAmelCase__ = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 |
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
order.append(SCREAMING_SNAKE_CASE__ )
return order
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return component
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ):
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
__UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1]
if not visited[vert]:
__UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
components_list.append(SCREAMING_SNAKE_CASE__ )
return components_list
| 62 | 0 |
def A_ ( A__ ) -> bool:
a__ : Union[str, Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( A__ = 5000 ) -> int:
a__ : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , A__ )]
for i, pentagonal_i in enumerate(A__ ):
for j in range(A__ , len(A__ ) ):
a__ : List[str] = pentagonal_nums[j]
a__ : Optional[Any] = pentagonal_i + pentagonal_j
a__ : Tuple = pentagonal_j - pentagonal_i
if is_pentagonal(A__ ) and is_pentagonal(A__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 99 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_A = {'vinai/bartpho-syllable': 1024}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =monolingual_vocab_file
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase ={}
__UpperCamelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =cnt
cnt += 1
with open(A_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__UpperCamelCase =line.strip().split()[0]
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
__UpperCamelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> List[str]:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Any:
return len(self.fairseq_ids_to_tokens )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self , A_ ) -> int:
return self.fairseq_ids_to_tokens[index]
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , A_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(A_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 62 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''detr'''
__lowercase : Any = ['''past_key_values''']
__lowercase : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=3 , lowerCAmelCase__=1_0_0 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=8 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1.0 , lowerCAmelCase__=False , lowerCAmelCase__="sine" , lowerCAmelCase__="resnet50" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , **lowerCAmelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""")
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""")
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""")
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCAmelCase__)
# set timm attributes to None
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None, None, None
__SCREAMING_SNAKE_CASE = use_timm_backbone
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = backbone
__SCREAMING_SNAKE_CASE = use_pretrained_backbone
__SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
return self.encoder_attention_heads
@property
def snake_case_ ( self):
return self.d_model
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
return cls(backbone_config=lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
])
@property
def snake_case_ ( self):
return 1E-5
@property
def snake_case_ ( self):
return 1_2
| 100 |
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.